### Eclipse Workspace Patch 1.0 #P apache-trunk Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (working copy) @@ -324,6 +324,11 @@ + " is disabled. Hence not assigning region" + hri.getEncodedName()); return false; } + if (assignmentManager.isRegionInMerging(hri)) { + LOG.info("Region " + hri.getRegionNameAsString() + + " is merging. Hence not assigning it"); + return false; + } return true; } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; @@ -131,6 +132,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; @@ -269,6 +272,9 @@ // Cluster status zk tracker and local setter private ClusterStatusTracker clusterStatusTracker; + // manager of region merge transactions + private RegionMergeManager regionMergeManager; + // buffer for "fatal error" notices from region servers // in the cluster. This is only used for assisting // operations/debugging. @@ -504,6 +510,7 @@ if (this.catalogTracker != null) this.catalogTracker.stop(); if (this.serverManager != null) this.serverManager.stop(); if (this.assignmentManager != null) this.assignmentManager.stop(); + if (this.regionMergeManager != null) this.regionMergeManager.stop(); if (this.fileSystemManager != null) this.fileSystemManager.stop(); this.zooKeeper.close(); } @@ -551,6 +558,9 @@ this.catalogTracker, this.balancer, this.executorService, this.metricsMaster); zooKeeper.registerListenerFirst(assignmentManager); + this.regionMergeManager = new RegionMergeManager(this, this.zooKeeper); + this.regionMergeManager.start(); + this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager); this.regionServerTracker.start(); @@ -1472,6 +1482,30 @@ } @Override + public MergeRegionResponse mergeRegions(RpcController controller, + MergeRegionRequest req) throws ServiceException { + MergeRegionResponse mrr = MergeRegionResponse.newBuilder().build(); + try{ + LOG.info("Received request to merge region: " + + req.getRegionAName().toString(HConstants.UTF8_ENCODING) + " and " + + req.getRegionBName().toString(HConstants.UTF8_ENCODING) + + " in table " + + req.getTableName().toString(HConstants.UTF8_ENCODING) + " on " + + this.serverName); + RegionMergeManager.createMergeRequest(this.zooKeeper, req.getTableName() + .toString(HConstants.UTF8_ENCODING), + req.getRegionAName().toString(HConstants.UTF8_ENCODING), req + .getRegionBName().toString(HConstants.UTF8_ENCODING), req + .getForce()); + } catch (KeeperException ke) { + throw new ServiceException(ke); + } catch (UnsupportedEncodingException e) { + throw new ServiceException(e); + } + return mrr; + } + + @Override public void createTable(HTableDescriptor hTableDescriptor, byte [][] splitKeys) throws IOException { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/OnlineMerge.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/OnlineMerge.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/OnlineMerge.java (revision 0) @@ -0,0 +1,238 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.master.RegionMergeManager.MergeTransactionData; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +/** + * Utility that can merge any two regions in the same table: adjacent, + * overlapping or disjoint. Different from {@link Merge}, it needn't shutdown + * cluster or disable table; + * + */ +public class OnlineMerge extends Configured implements Tool { + static final Log LOG = LogFactory.getLog(OnlineMerge.class); + + private String tableName; // Name of table + private String region1EncodedName; // EncodedName of region 1 + private String region2EncodedName; // EncodedName of region 1 + private HRegionInfo region1Info; // Info of region 1 + private HRegionInfo region2Info; // Info of region 1 + private HRegionInfo mergedRegionInfo; // Info of merged region + private HTableDescriptor htd; + private boolean isForce = false; + private boolean sync = true; + private CatalogTracker catalogTracker; + + /** default constructor */ + public OnlineMerge() { + super(); + } + + /** + * @param conf configuration + */ + public OnlineMerge(Configuration conf) { + setConf(conf); + } + + private int parseArgs(String[] args) throws IOException { + GenericOptionsParser parser = new GenericOptionsParser(getConf(), args); + + String[] remainingArgs = parser.getRemainingArgs(); + if (remainingArgs.length != 3) { + usage(); + return -1; + } + tableName = remainingArgs[0]; + region1EncodedName = remainingArgs[1]; + region2EncodedName = remainingArgs[2]; + int status = 0; + if (region1EncodedName.equals(region2EncodedName)) { + LOG.error("Can't merge a region with itself"); + status = -1; + } + return status; + } + + private void usage() { + System.err.println("Usage: bin/hbase org.apache.hadoop.hbase.util.OnlineMerge" + + " [-force] [-async] [-show] " + + " \n"); + } + + @SuppressWarnings("deprecation") + public int run(String[] args) throws Exception { + if (parseArgs(args) != 0) { + return -1; + } + HBaseAdmin hbaseAdmin = new HBaseAdmin(getConf()); + HConnection connection = HConnectionManager.getConnection(getConf()); + ZooKeeperWatcher zkw = connection.getZooKeeperWatcher(); + try { + if (sync) { + this.htd = connection.getHTableDescriptor(Bytes.toBytes(tableName)); + if (htd == null) { + System.err.println("Couldn't get table descriptor"); + return -1; + } + this.catalogTracker = new CatalogTracker(zkw, getConf(), connection); + this.catalogTracker.start(); + this.initRegionInfoByEncodedName(catalogTracker); + if (this.mergedRegionInfo == null) { + System.err.println("Couldn't get region info by encoded name"); + return -1; + } + } + String transactionName = MergeTransactionData.generateTransactionName( + tableName, region1EncodedName, region2EncodedName, isForce); + System.out.println("Send merge request " + transactionName); + hbaseAdmin.mergeRegions(Bytes.toBytes(tableName),Bytes.toBytes(region1EncodedName), + Bytes.toBytes(region2EncodedName),isForce); + if (sync) { + String transactionNode = ZKUtil.joinZNode(zkw.mergeZNode, transactionName); + System.out.print("Waiting for completion..."); + while (true) { + boolean existed = ZKUtil.watchAndCheckExists(zkw, transactionNode); + if (!existed) { + break; + } else { + System.out.print("."); + Thread.sleep(1000); + } + } + System.out.println(""); + Pair mergedRegionLocation = MetaReader + .getRegion(catalogTracker, mergedRegionInfo.getRegionName()); + if (mergedRegionLocation != null) { + System.out.println("Complete merging, new region =" + + mergedRegionInfo.getRegionNameAsString() + + ", assigned on the server " + mergedRegionLocation.getSecond()); + } else { + System.err.println("Merge is cancelled, please see the logs on the master"); + } + } + } finally { + if (this.catalogTracker != null) { + this.catalogTracker.stop(); + } + connection.close(); + } + return 0; + } + + private void setForce(boolean force) { + this.isForce = force; + } + + private void setSync(boolean sync) { + this.sync = sync; + } + + private void initRegionInfoByEncodedName(CatalogTracker ct) + throws IOException { + if (tableName != null && (region1Info == null || region2Info == null)) { + List tableRegions = MetaReader.getTableRegions(ct, + Bytes.toBytes(tableName)); + for (HRegionInfo regionInfo : tableRegions) { + if (regionInfo.getEncodedName().equals(region1EncodedName)) { + region1Info = regionInfo; + } else if (regionInfo.getEncodedName().equals(region2EncodedName)) { + region2Info = regionInfo; + } + if (region1Info != null && region2Info != null) { + this.mergedRegionInfo = HRegionInfo.generateFixedRegionByMerge( + this.htd, this.region1Info, this.region2Info); + break; + } + } + } + } + + static void showMergeRequests() throws Exception { + HConnection connection = HConnectionManager.getConnection(HBaseConfiguration.create()); + try { + ZooKeeperWatcher zkw = connection.getZooKeeperWatcher(); + List mergingRequests = ZKUtil.listChildrenNoWatch(zkw, + zkw.mergeZNode); + System.out.println("Handling merge requests:"); + for (String mergeRequest : mergingRequests) { + System.out.println(mergeRequest); + } + } finally { + connection.close(); + } + } + + public static void main(String[] args) { + int status; + OnlineMerge onlineMerge = new OnlineMerge(); + try { + while (true) { + if (args.length >= 1 && "-force".equals(args[0])) { + String[] newArgs = new String[args.length - 1]; + for (int i = 1; i < args.length; i++) { + newArgs[i - 1] = args[i]; + } + args = newArgs; + onlineMerge.setForce(true); + } else if (args.length >= 1 && "-async".equals(args[0])) { + String[] newArgs = new String[args.length - 1]; + for (int i = 1; i < args.length; i++) { + newArgs[i - 1] = args[i]; + } + args = newArgs; + onlineMerge.setSync(false); + } else if (args.length >= 1 && "-show".equals(args[0])) { + showMergeRequests(); + System.exit(0); + } else { + break; + } + } + status = ToolRunner.run(HBaseConfiguration.create(), onlineMerge, args); + } catch (Exception e) { + LOG.error("exiting due to error", e); + status = -1; + } + System.exit(status); + } +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java (working copy) @@ -92,6 +92,11 @@ @Override public void process() { LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName()); + // Check if this region is in merging or not + if (this.assignmentManager.isRegionInMerging(this.regionInfo)) { + assignmentManager.offlineMergingRegion(regionInfo); + return; + } // Check if this table is being disabled or not if (this.assignmentManager.getZKTable(). isDisablingOrDisabledTable(this.regionInfo.getTableNameAsString())) { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionMergeManager.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionMergeManager.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionMergeManager.java (revision 0) @@ -0,0 +1,766 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.MergeTransaction.JournalState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.zookeeper.ZKTable; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Receive and handle merge request via zk, manage merge transaction and merging + * region list, resubmit the failed merges by a period + */ +public class RegionMergeManager extends ZooKeeperListener { + private static final Log LOG = LogFactory.getLog(RegionMergeManager.class); + + private MasterServices master; + private final AssignmentManager assignmentManager; + private final CatalogJanitor catalogJanitor; + private final Configuration conf; + + // A set used to store the regions which are merging; A region in merging + // means it's disabled, isn't online anywhere and won't be assigned any more; + private final ConcurrentSkipListSet mergingRegions; + // Set of region which are already split; Only add to this set if it is in + // mergingRegions + private final ConcurrentSkipListSet splitRegions; + + // Map of merge transaction in processing where the key is transaction name + private final ConcurrentHashMap processingMergeTransaction; + + // Queue of merge transactions which are waiting for resubmit + private final BlockingQueue resubmissionQueue; + + /** Resubmit thread schedule pool */ + private ScheduledExecutorService resubmitSchedulePool; + // Running interval of resubmit thread, 60s as default + private final int resubmitThreadPeriod; + static final int DEFAULT_RESUBMIT_PERIOD = 60; + + // Used for wait signal + private final Object present_object = new Object(); + + private final int timePerSleep = 10; + // 120s + private static final int DEFAULT_TIME_OUT = 120000; + private final int timeout; + /** + * If it is the first time we called {@link RegionMergeManager#fetchMergeRequest()}, + * we should make sure old merge transactions handled first + */ + private boolean isFirstFetchRequest = true; + + // Thread pool used to run merge request + private ExecutorService mergesPool; + private final int mergeThreads; + + /** + * Constructs a new merge manager. + * @param master + * @param watcher + * @throws KeeperException + * @throws IOException + */ + public RegionMergeManager(final MasterServices master, ZooKeeperWatcher watcher) + throws KeeperException, IOException { + super(watcher); + this.master = master; + this.conf = master.getConfiguration(); + this.timeout = conf.getInt("hbase.merge.wait.timeout.millis", DEFAULT_TIME_OUT); + this.catalogJanitor = new CatalogJanitor(master, master); + this.assignmentManager = master.getAssignmentManager(); + this.mergingRegions = new ConcurrentSkipListSet(); + this.splitRegions = new ConcurrentSkipListSet(); + this.processingMergeTransaction = new ConcurrentHashMap(); + this.resubmissionQueue = new LinkedBlockingQueue(); + this.mergeThreads = conf.getInt("hbase.merge.thread.count", 1); + this.resubmitThreadPeriod = conf.getInt("hbase.merge.resubmit.period", + DEFAULT_RESUBMIT_PERIOD); + this.assignmentManager.setMergeManager(this); + + } + + /** + * Submit the merge request to the pool + * @param transactionData + * @param initialState + * @throws IOException + */ + private void requestMerge(MergeTransactionData transactionData, + JournalState initialState) { + try { + this.mergesPool.execute(new MergeRequest(this, transactionData, + initialState, master)); + if (LOG.isDebugEnabled()) { + LOG.debug("Merge requested for " + transactionData + ",initialState= " + + initialState); + } + } catch (RejectedExecutionException ree) { + LOG.warn("Could not execute merge for " + transactionData, ree); + } + } + + /** + * Handle the merge transaction data from ZK + * @param transactionData + * @throws KeeperException + * @throws IOException + */ + synchronized private void handleMergeTransactionData( + MergeTransactionData transactionData) throws KeeperException { + String regionA = transactionData.getRegionA(); + String regionB = transactionData.getRegionB(); + if (mergingRegions.contains(regionA) || mergingRegions.contains(regionB)) { + LOG.warn("Merge request " + transactionData + " try to merge region " + + (mergingRegions.contains(regionA) ? regionA : regionB) + + "which is already in merging"); + finishMergeTransaction(transactionData, false); + } else if (regionA.equals(regionB)) { + LOG.warn("Error merge request:" + transactionData + + ",shouldn't merge the same region"); + finishMergeTransaction(transactionData, false); + } else { + String transactionNode = ZKUtil.joinZNode(watcher.mergeZNode, + transactionData.getTransactionName()); + byte[] data = ZKUtil.getData(watcher, transactionNode); + JournalState initialState = MergeTransaction + .parseJournalStateFromBytes(data); + mergingRegions.add(regionA); + mergingRegions.add(regionB); + requestMerge(transactionData, initialState); + } + } + + HRegionInfo getRegionInfoByEncodedName(String tableName, + String regionEncodeName) throws IOException { + RegionState regionState = this.assignmentManager.getRegionStates() + .getRegionState(regionEncodeName); + if (regionState != null) { + return regionState.getRegion(); + } + List tableRegions = MetaReader.getTableRegions( + this.master.getCatalogTracker(), Bytes.toBytes(tableName)); + for (HRegionInfo regionInfo : tableRegions) { + if (regionInfo.getEncodedName().equals(regionEncodeName)) { + return regionInfo; + } + } + return null; + } + + /** + * Starts the thread pool and the tracking of merge request. + * + * @throws KeeperException + * @throws IOException + */ + public void start() throws KeeperException, IOException { + this.mergesPool = Executors.newFixedThreadPool( + mergeThreads, + new ThreadFactoryBuilder() + .setNameFormat("MergeManager-Merge-Thread #%d").setDaemon(true) + .build()); + + this.resubmitSchedulePool = Executors.newScheduledThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("MergeManager-Resubmit-Thread #%d").setDaemon(true) + .build()); + resubmitSchedulePool.scheduleAtFixedRate(new ResubmitThread(this, + this.resubmissionQueue), resubmitThreadPeriod, resubmitThreadPeriod, + TimeUnit.SECONDS); + watcher.registerListener(this); + fetchMergeRequest(); + } + + /** + * Shutdown the threadpools + */ + public void stop() { + if (mergesPool != null) { + this.mergesPool.shutdown(); + waitFor(mergesPool, "Region Merge Thread"); + } + if (this.resubmitSchedulePool != null) { + this.resubmitSchedulePool.shutdown(); + } + } + + private void waitFor(ExecutorService t, String name) { + boolean done = false; + while (!done) { + try { + done = t.awaitTermination(10, TimeUnit.SECONDS); + LOG.info("Waiting for " + name + " to finish..."); + if (!done) { + t.shutdownNow(); + } + } catch (InterruptedException ie) { + LOG.warn("Interrupted waiting for " + name + " to finish..."); + } + } + } + + /** + * Thread used to resubmit failed merge transaction + */ + private static class ResubmitThread extends Thread { + RegionMergeManager mergeManager; + BlockingQueue resubmitQueue; + + public ResubmitThread(RegionMergeManager mergeManager, + BlockingQueue resubmitQueue) { + super("MergeManager.ResubmitThread"); + this.mergeManager = mergeManager; + this.resubmitQueue = resubmitQueue; + setDaemon(true); + } + + @Override + public void run() { + MergeTransactionData transactionData = null; + try { + while ((transactionData = resubmitQueue.poll()) != null) { + String transactionNode = ZKUtil.joinZNode( + mergeManager.watcher.mergeZNode, + transactionData.getTransactionName()); + byte[] data = ZKUtil.getData(mergeManager.watcher, transactionNode); + JournalState initialState = MergeTransaction + .parseJournalStateFromBytes(data); + mergeManager.requestMerge(transactionData, initialState); + } + } catch (KeeperException e) { + LOG.warn("Failed resubmit transaction " + transactionData, e); + if (transactionData != null) { + mergeManager.addToResubmitQueue(transactionData); + } + } + } + } + + /** + * Fetch the merge request via zk + * @throws KeeperException + * @throws IOException + */ + synchronized private void fetchMergeRequest() throws KeeperException { + List mergeTransactions = ZKUtil.listChildrenAndWatchForNewChildren( + watcher, watcher.mergeZNode); + if (isFirstFetchRequest) { + isFirstFetchRequest = false; + List oldMergeTransactions = new ArrayList(); + List newMergeTransactions = new ArrayList(); + for (String transactionName : mergeTransactions) { + String transactionNode = ZKUtil.joinZNode(watcher.mergeZNode, + transactionName); + byte[] data = ZKUtil.getData(watcher, transactionNode); + JournalState initialState = MergeTransaction + .parseJournalStateFromBytes(data); + if (initialState != JournalState.CREATE_MERGE) { + oldMergeTransactions.add(transactionName); + } else { + newMergeTransactions.add(transactionName); + } + } + mergeTransactions.clear(); + mergeTransactions.addAll(oldMergeTransactions); + mergeTransactions.addAll(newMergeTransactions); + } + for (String transactionName : mergeTransactions) { + if (!MergeTransactionData.isValidTransactionName(transactionName)) { + LOG.warn("Invalid merge transaction name=" + transactionName + + ", clean it"); + ZKUtil.deleteNodeRecursively(watcher, + ZKUtil.joinZNode(watcher.mergeZNode, transactionName)); + } else if (this.processingMergeTransaction.containsKey(transactionName)) { + continue; + } else { + MergeTransactionData transactionData = new MergeTransactionData( + transactionName); + this.processingMergeTransaction.put(transactionName, transactionData); + handleMergeTransactionData(transactionData); + } + } + } + + /** + * Finish the merge transaction: 1.delete the transaction zk node; 2.remove + * the region in set if it is called after we have submit the request + * @param transactionData + * @param successfulSubmission true if it is called by the MergeRequest thread + * @throws KeeperException + */ + void finishMergeTransaction(MergeTransactionData transactionData, + boolean successfulSubmission) throws KeeperException { + ZKUtil.deleteNodeRecursively(watcher, + ZKUtil.joinZNode(watcher.mergeZNode, + transactionData.getTransactionName())); + this.processingMergeTransaction + .remove(transactionData.getTransactionName()); + if (successfulSubmission) { + String regionA = transactionData.getRegionA(); + String regionB = transactionData.getRegionB(); + this.mergingRegions.remove(regionA); + this.mergingRegions.remove(regionB); + this.splitRegions.remove(regionA); + this.splitRegions.remove(regionB); + } + } + + /** + * Add to the resubmit queue + * @param transactionData + */ + void addToResubmitQueue(MergeTransactionData transactionData) { + this.resubmissionQueue.add(transactionData); + } + + /** + * Used in test + * @return resubmit queue size + */ + int resubmitQueusSize() { + return this.resubmissionQueue.size(); + } + + /** + * Used in test + * @throws InterruptedException + */ + void triggerResubmissionForTest() throws InterruptedException { + ResubmitThread resubmitThread = new ResubmitThread(this, + this.resubmissionQueue); + resubmitThread.start(); + resubmitThread.join(); + } + + /** + * Assign the merging region if it is offline, called when cancelling the + * merge + * @param regionInfo + */ + void assignMergingRegionIfOffline(HRegionInfo regionInfo) { + String tableName = regionInfo.getTableNameAsString(); + ZKTable zkTable = assignmentManager.getZKTable(); + if (zkTable.isTablePresent(tableName) + && !zkTable.isDisablingOrDisabledTable(tableName)) { + if (isRegionOffline(regionInfo) && !isRegionSplit(regionInfo)) { + assignmentManager.assign(regionInfo, true); + } + } + this.mergingRegions.remove(regionInfo.getEncodedName()); + } + + /** + * Assign the merged region if it is offline, called after adding merged + * region to META + * @param regionInfo + * @throws InterruptedException + */ + void assignMergedRegionIfOffline(HRegionInfo mergedRegionInfo) + throws InterruptedException { + String tableName = mergedRegionInfo.getTableNameAsString(); + ZKTable zkTable = assignmentManager.getZKTable(); + waitUntilTableNotInEnabling(zkTable, tableName); + if (zkTable.isTablePresent(tableName) + && !zkTable.isDisablingOrDisabledTable(tableName)) { + if (isRegionOffline(mergedRegionInfo)) { + assignmentManager.assign(mergedRegionInfo, true); + } + } + } + + /** + * Wait until two regions are offline + * @param regionA + * @param regionB + * @throws InterruptedException + */ + void waitUntilRegionsOffline(HRegionInfo regionA, HRegionInfo regionB) + throws InterruptedException { + long expireTime = EnvironmentEdgeManager.currentTimeMillis() + this.timeout; + while (!this.master.isStopped() && EnvironmentEdgeManager.currentTimeMillis() < expireTime) { + if (isRegionOffline(regionA) && isRegionOffline(regionB)) { + return; + } else { + synchronized (present_object) { + present_object.wait(timePerSleep); + } + } + } + throw new RuntimeException("Timeout waiting region " + + (isRegionOffline(regionA) ? regionB.getEncodedName() + : regionA.getEncodedName()) + " offline"); + } + + /** + * Wait until master is initialized + * @throws InterruptedException + */ + void waitUntilMasterInitialized() throws InterruptedException { + while (!this.master.isStopped()) { + if (((HMaster) this.master).isInitialized()) { + return; + } + Thread.sleep(timePerSleep); + } + throw new RuntimeException("Timeout waiting master initialized"); + } + + /** + * Wait until no dead server in process + * @param masterServices + * @throws InterruptedException + */ + void waitUntilNoDeadServerInProcess(MasterServices masterServices) + throws InterruptedException { + while (!this.master.isStopped()) { + if (!masterServices.getServerManager().areDeadServersInProgress()) { + return; + } + Thread.sleep(timePerSleep); + } + throw new RuntimeException("Timeout waiting no deadserver in process"); + } + + /** + * Wait until specified table not in enabling state + * @param zkTable + * @param tableName + * @throws InterruptedException + */ + void waitUntilTableNotInEnabling(ZKTable zkTable, String tableName) + throws InterruptedException { + long expireTime = EnvironmentEdgeManager.currentTimeMillis() + this.timeout; + while (!this.master.isStopped() && EnvironmentEdgeManager.currentTimeMillis() < expireTime) { + if (!zkTable.isEnablingTable(tableName)) { + return; + } + Thread.sleep(timePerSleep); + } + throw new RuntimeException("Timeout waiting table " + tableName + " not in enabling"); + } + + /** + * Check if the merging region is split + * @param regionInfo + * @return + */ + boolean isRegionSplit(HRegionInfo regionInfo) { + if (regionInfo.isSplitParent()) { + return true; + } + if (this.splitRegions.contains(regionInfo.getEncodedName())) { + return true; + } + return false; + } + + /** + * Check if the merging region is offline + * @param regionInfo + * @return + */ + boolean isRegionOffline(HRegionInfo regionInfo) { + if (!this.assignmentManager.getRegionStates().isRegionInTransition(regionInfo) + && this.assignmentManager.getRegionStates().getRegionServerOfRegion(regionInfo) == null) { + return true; + } + return false; + } + + /** + * Record the transaction's JournalState on the ZK + * @param transactionName + * @param state + * @throws NoNodeException + * @throws KeeperException + */ + void setJournalStateOnZK(String transactionName, JournalState state) + throws NoNodeException, KeeperException { + String transactionNode = ZKUtil.joinZNode(watcher.mergeZNode, + transactionName); + ZKUtil.setData(watcher, transactionNode, + MergeTransaction.getBytesFromJournalState(state)); + } + + /** + * Clean parent region of merging regions in prepare step + * @param parent + * @param rowContent + * @return true if cleaned successfully + * @throws IOException + */ + boolean cleanParent(final HRegionInfo parent, Result rowContent) + throws IOException { + return this.catalogJanitor.cleanParent(parent, rowContent); + } + + /** + * Clean reference for the merged region + * @param mergedRegion + * @param regionA + * @param regionB + * @return true if cleaned successfully + * @throws IOException + */ + boolean cleanMergeRegion(final HRegionInfo mergedRegion, + final HRegionInfo regionA, final HRegionInfo regionB) throws IOException { + return this.catalogJanitor.cleanMergeRegion(mergedRegion, regionA, regionB); + } + + /** + * Get split parents region for a specified table + * @param tableName + * @return map of split parent regioninfos + */ + Map getSplitParents(final byte[] tableName) + throws IOException { + return this.catalogJanitor.getMergedRegionsAndSplitParents(tableName) + .getThird(); + } + + /** + * Check whether a region is in merging, if true it shouldn't be assigned + * @param regionInfo + * @return true if region in the merging + */ + boolean isRegionInMerging(final HRegionInfo regionInfo) { + return this.mergingRegions.contains(regionInfo.getEncodedName()); + } + + /** + * Check if any regions are merging + * @return true if any regions in merging + */ + boolean isAnyRegionInMerging() { + return !this.mergingRegions.isEmpty(); + } + + public Set getMergingRegions() { + Set clone = new HashSet(this.mergingRegions.size()); + clone.addAll(this.mergingRegions); + return clone; + } + + /** + * Only used for test + * @param regionName encoded name + */ + void addToMergingRegions(String regionName) { + mergingRegions.add(regionName); + } + + /** + * Get the merging region num for the specified table + * @param tableName + * @return number of merging regions of given table + */ + public int getNumOfMergingRegionsForTable(String tableName) { + int num = 0; + for (MergeTransactionData transactionData : processingMergeTransaction + .values()) { + if (tableName.equals(transactionData.getTableName())) { + num += 2; + } + } + return num; + } + + /** + * AssignmentManager notifies MergeManager one region is offline + * @param regionInfo + */ + void notifyRegionOffline(final HRegionInfo regionInfo) { + if (this.mergingRegions.contains(regionInfo.getEncodedName())) { + synchronized (present_object) { + present_object.notify(); + } + } + } + + /** + * AssignmentManager notify MergeManager one region is split + * @param regionInfo + */ + void notifyRegionSplit(final HRegionInfo regionInfo) { + if (this.mergingRegions.contains(regionInfo.getEncodedName())) { + this.splitRegions.add(regionInfo.getEncodedName()); + synchronized (present_object) { + present_object.notifyAll(); + } + } + } + + // ZooKeeper events + + /** + * New transaction node has been created or deleted. + *

+ * When this happens we must: + *

    + *
  1. Find the new merge request and handle it
  2. + *
+ */ + @Override + public void nodeChildrenChanged(String path) { + if (path.equals(watcher.mergeZNode)) { + try { + LOG.debug("Merge request arrived or completed "); + fetchMergeRequest(); + } catch (KeeperException e) { + LOG.error("Unexpected ZK exception handling merge requests", e); + } catch (Throwable t) { + LOG.error("Unhandled exception: " + t.getMessage(), t); + } + } + } + + public static class MergeTransactionData { + public static final String SEPARATOR = "#"; + private final String transactionName; + private final String tableName; + private final String regionA; + private final String regionB; + private final boolean isForce; + + MergeTransactionData(String transactionName) { + this.transactionName = transactionName; + String[] elements = transactionName.split(SEPARATOR); + assert elements.length == 4; + this.tableName = elements[0]; + this.regionA = elements[1]; + this.regionB = elements[2]; + this.isForce = Boolean.parseBoolean(elements[3]); + } + + String getTransactionName() { + return this.transactionName; + } + + String getTableName() { + return this.tableName; + } + + String getRegionA() { + return this.regionA; + } + + String getRegionB() { + return this.regionB; + } + + boolean isForce() { + return this.isForce; + } + + @Override + public String toString() { + return "table=" + tableName + ", regionA=" + regionA + ", regionB=" + + regionB + ", force=" + isForce; + } + + /** + * Valid TransactionName=tableName+'#'+AEncodedName+"#"+BEncodedName+"#"+ + * "true/false" + * @param transactionName + * @return true if it is valid + */ + public static boolean isValidTransactionName(String transactionName) { + String[] elements = transactionName.split(SEPARATOR); + if (elements.length != 4) { + return false; + } + if (elements[1].length() != elements[2].length()) { + return false; + } + if (elements[1].equals(elements[2])) { + return false; + } + if (!elements[3].equals("true") && !elements[3].equals("false")) { + return false; + } + return true; + } + + public static String generateTransactionName(String tableName, + String regionAEncodedName, String regionBEncodedName, boolean force) { + return tableName + SEPARATOR + regionAEncodedName + SEPARATOR + + regionBEncodedName + SEPARATOR + force; + } + } + + /** + * Create a merge request via zk.Set force true if merge two non-adjacent + * regions, else the request will be skipped + * @param zkw + * @param tableName + * @param regionAEncodedName + * @param regionBEncodedName + * @param force if it is false, only merge two adjacent regions + * @throws KeeperException + */ + static void createMergeRequest(ZooKeeperWatcher zkw, String tableName, + String regionAEncodedName, String regionBEncodedName, boolean force) + throws KeeperException { + String transactionName = MergeTransactionData.generateTransactionName( + tableName, regionAEncodedName, regionBEncodedName, force); + if (MergeTransactionData.isValidTransactionName(transactionName)) { + String transactionNode = ZKUtil + .joinZNode(zkw.mergeZNode, transactionName); + ZKUtil.createAndNoWatch(zkw, transactionNode, + MergeTransaction.getBytesFromJournalState(JournalState.CREATE_MERGE)); + } else { + throw new IllegalArgumentException("Unknown merge parameters, table=" + + tableName + ", regionA=" + regionAEncodedName + ", regionB=" + + regionBEncodedName); + } + + } +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (working copy) @@ -1113,5 +1113,43 @@ } return hris; } + + /** + * Generate a fixed region info when merging + * @param tabledesc + * @param a merging region A + * @param b merging region B + * @return the merged region info + * @throws IOException + */ + public static HRegionInfo generateFixedRegionByMerge( + final HTableDescriptor tabledesc, final HRegionInfo a, final HRegionInfo b) + throws IOException { + if (!a.getTableNameAsString().equals(b.getTableNameAsString())) { + throw new IllegalArgumentException("Regions do not belong to the same table"); + } + // Presume both are of same region type -- i.e. both user or catalog + // table regions. This way can use comparator. + final byte[] startKey = (a.getComparator().matchingRows(a.getStartKey(), 0, + a.getStartKey().length, HConstants.EMPTY_BYTE_ARRAY, 0, + HConstants.EMPTY_BYTE_ARRAY.length) || b.getComparator().matchingRows( + b.getStartKey(), 0, b.getStartKey().length,HConstants.EMPTY_BYTE_ARRAY, 0, + HConstants.EMPTY_BYTE_ARRAY.length)) ? HConstants.EMPTY_BYTE_ARRAY : + (a.getComparator().compareRows(a.getStartKey(), 0,a.getStartKey().length, + b.getStartKey(), 0, b.getStartKey().length) <= 0 ? a.getStartKey() : b.getStartKey()); + final byte[] endKey = (a.getComparator().matchingRows(a.getEndKey(), 0, + a.getEndKey().length, HConstants.EMPTY_BYTE_ARRAY, 0, + HConstants.EMPTY_BYTE_ARRAY.length) || a.getComparator().matchingRows( + b.getEndKey(), 0, b.getEndKey().length, HConstants.EMPTY_BYTE_ARRAY, 0, + HConstants.EMPTY_BYTE_ARRAY.length)) ? HConstants.EMPTY_BYTE_ARRAY : (a + .getComparator().compareRows(a.getEndKey(), 0, a.getEndKey().length, + b.getEndKey(), 0, b.getEndKey().length) <= 0 ? b.getEndKey() : a + .getEndKey()); + // Merged region is sorted between two merging regions + HRegionInfo newRegionInfo = new HRegionInfo(tabledesc.getName(), startKey, + endKey, false, Math.max(a.getRegionId(), b.getRegionId()) + 1); + return newRegionInfo; + } + } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (working copy) @@ -103,6 +103,8 @@ public String splitLogZNode; // znode containing the state of the load balancer public String balancerZNode; + // znode used for merge + public String mergeZNode; // Certain ZooKeeper nodes need to be world-readable public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = @@ -166,6 +168,7 @@ ZKUtil.createAndFailSilent(this, tableZNode); ZKUtil.createAndFailSilent(this, splitLogZNode); ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode); + ZKUtil.createAndFailSilent(this, mergeZNode); } catch (KeeperException e) { throw new ZooKeeperConnectionException( prefix("Unexpected KeeperException creating base node"), e); @@ -215,6 +218,8 @@ conf.get("zookeeper.znode.splitlog", HConstants.SPLIT_LOGDIR_NAME)); balancerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); + mergeZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.merge", "merge")); } /** Index: hbase-protocol/src/main/protobuf/MasterAdmin.proto =================================================================== --- hbase-protocol/src/main/protobuf/MasterAdmin.proto (revision 1443294) +++ hbase-protocol/src/main/protobuf/MasterAdmin.proto (working copy) @@ -63,6 +63,16 @@ message MoveRegionResponse { } +message MergeRegionRequest { + required bytes tableName = 1; + required bytes regionAName = 2; + required bytes regionBName = 3; + optional bool force = 4 [default = false]; +} + +message MergeRegionResponse { +} + message AssignRegionRequest { required RegionSpecifier region = 1; } @@ -193,6 +203,10 @@ /** Move the region region to the destination server. */ rpc moveRegion(MoveRegionRequest) returns(MoveRegionResponse); + + /** Merge two regions */ + rpc mergeRegions(MergeRegionRequest) + returns(MergeRegionResponse); /** Assign a region to a server chosen at random. */ rpc assignRegion(AssignRegionRequest) Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java =================================================================== --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java (revision 1443294) +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java (working copy) @@ -3404,6 +3404,908 @@ // @@protoc_insertion_point(class_scope:MoveRegionResponse) } + public interface MergeRegionRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes tableName = 1; + boolean hasTableName(); + com.google.protobuf.ByteString getTableName(); + + // required bytes regionAName = 2; + boolean hasRegionAName(); + com.google.protobuf.ByteString getRegionAName(); + + // required bytes regionBName = 3; + boolean hasRegionBName(); + com.google.protobuf.ByteString getRegionBName(); + + // optional bool force = 4 [default = false]; + boolean hasForce(); + boolean getForce(); + } + public static final class MergeRegionRequest extends + com.google.protobuf.GeneratedMessage + implements MergeRegionRequestOrBuilder { + // Use MergeRegionRequest.newBuilder() to construct. + private MergeRegionRequest(Builder builder) { + super(builder); + } + private MergeRegionRequest(boolean noInit) {} + + private static final MergeRegionRequest defaultInstance; + public static MergeRegionRequest getDefaultInstance() { + return defaultInstance; + } + + public MergeRegionRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionRequest_fieldAccessorTable; + } + + private int bitField0_; + // required bytes tableName = 1; + public static final int TABLENAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString tableName_; + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + + // required bytes regionAName = 2; + public static final int REGIONANAME_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString regionAName_; + public boolean hasRegionAName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getRegionAName() { + return regionAName_; + } + + // required bytes regionBName = 3; + public static final int REGIONBNAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString regionBName_; + public boolean hasRegionBName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getRegionBName() { + return regionBName_; + } + + // optional bool force = 4 [default = false]; + public static final int FORCE_FIELD_NUMBER = 4; + private boolean force_; + public boolean hasForce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public boolean getForce() { + return force_; + } + + private void initFields() { + tableName_ = com.google.protobuf.ByteString.EMPTY; + regionAName_ = com.google.protobuf.ByteString.EMPTY; + regionBName_ = com.google.protobuf.ByteString.EMPTY; + force_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionAName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionBName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, tableName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, regionAName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, regionBName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(4, force_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, tableName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, regionAName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, regionBName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, force_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasRegionAName() == other.hasRegionAName()); + if (hasRegionAName()) { + result = result && getRegionAName() + .equals(other.getRegionAName()); + } + result = result && (hasRegionBName() == other.hasRegionBName()); + if (hasRegionBName()) { + result = result && getRegionBName() + .equals(other.getRegionBName()); + } + result = result && (hasForce() == other.hasForce()); + if (hasForce()) { + result = result && (getForce() + == other.getForce()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLENAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasRegionAName()) { + hash = (37 * hash) + REGIONANAME_FIELD_NUMBER; + hash = (53 * hash) + getRegionAName().hashCode(); + } + if (hasRegionBName()) { + hash = (37 * hash) + REGIONBNAME_FIELD_NUMBER; + hash = (53 * hash) + getRegionBName().hashCode(); + } + if (hasForce()) { + hash = (37 * hash) + FORCE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getForce()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + tableName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + regionAName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + regionBName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + force_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tableName_ = tableName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.regionAName_ = regionAName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.regionBName_ = regionBName_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.force_ = force_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + setTableName(other.getTableName()); + } + if (other.hasRegionAName()) { + setRegionAName(other.getRegionAName()); + } + if (other.hasRegionBName()) { + setRegionBName(other.getRegionBName()); + } + if (other.hasForce()) { + setForce(other.getForce()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!hasRegionAName()) { + + return false; + } + if (!hasRegionBName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + tableName_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + regionAName_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + regionBName_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + force_ = input.readBool(); + break; + } + } + } + } + + private int bitField0_; + + // required bytes tableName = 1; + private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getTableName() { + return tableName_; + } + public Builder setTableName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + + // required bytes regionAName = 2; + private com.google.protobuf.ByteString regionAName_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRegionAName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getRegionAName() { + return regionAName_; + } + public Builder setRegionAName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + regionAName_ = value; + onChanged(); + return this; + } + public Builder clearRegionAName() { + bitField0_ = (bitField0_ & ~0x00000002); + regionAName_ = getDefaultInstance().getRegionAName(); + onChanged(); + return this; + } + + // required bytes regionBName = 3; + private com.google.protobuf.ByteString regionBName_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasRegionBName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getRegionBName() { + return regionBName_; + } + public Builder setRegionBName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + regionBName_ = value; + onChanged(); + return this; + } + public Builder clearRegionBName() { + bitField0_ = (bitField0_ & ~0x00000004); + regionBName_ = getDefaultInstance().getRegionBName(); + onChanged(); + return this; + } + + // optional bool force = 4 [default = false]; + private boolean force_ ; + public boolean hasForce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public boolean getForce() { + return force_; + } + public Builder setForce(boolean value) { + bitField0_ |= 0x00000008; + force_ = value; + onChanged(); + return this; + } + public Builder clearForce() { + bitField0_ = (bitField0_ & ~0x00000008); + force_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:MergeRegionRequest) + } + + static { + defaultInstance = new MergeRegionRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MergeRegionRequest) + } + + public interface MergeRegionResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class MergeRegionResponse extends + com.google.protobuf.GeneratedMessage + implements MergeRegionResponseOrBuilder { + // Use MergeRegionResponse.newBuilder() to construct. + private MergeRegionResponse(Builder builder) { + super(builder); + } + private MergeRegionResponse(boolean noInit) {} + + private static final MergeRegionResponse defaultInstance; + public static MergeRegionResponse getDefaultInstance() { + return defaultInstance; + } + + public MergeRegionResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_MergeRegionResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:MergeRegionResponse) + } + + static { + defaultInstance = new MergeRegionResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MergeRegionResponse) + } + public interface AssignRegionRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -14389,6 +15291,11 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest request, com.google.protobuf.RpcCallback done); + public abstract void mergeRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest request, + com.google.protobuf.RpcCallback done); + public abstract void assignRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request, @@ -14507,6 +15414,14 @@ } @java.lang.Override + public void mergeRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest request, + com.google.protobuf.RpcCallback done) { + impl.mergeRegions(controller, request, done); + } + + @java.lang.Override public void assignRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request, @@ -14665,36 +15580,38 @@ case 3: return impl.moveRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest)request); case 4: + return impl.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest)request); + case 5: return impl.assignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest)request); - case 5: + case 6: return impl.unassignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest)request); - case 6: + case 7: return impl.offlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest)request); - case 7: + case 8: return impl.deleteTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest)request); - case 8: + case 9: return impl.enableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest)request); - case 9: + case 10: return impl.disableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest)request); - case 10: + case 11: return impl.modifyTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest)request); - case 11: + case 12: return impl.createTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest)request); - case 12: + case 13: return impl.shutdown(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest)request); - case 13: + case 14: return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest)request); - case 14: + case 15: return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest)request); - case 15: + case 16: return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest)request); - case 16: + case 17: return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)request); - case 17: + case 18: return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request); - case 18: + case 19: return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request); - case 19: + case 20: return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -14719,36 +15636,38 @@ case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest.getDefaultInstance(); case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.getDefaultInstance(); + case 5: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.getDefaultInstance(); - case 5: + case 6: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.getDefaultInstance(); - case 6: + case 7: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.getDefaultInstance(); - case 7: + case 8: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.getDefaultInstance(); - case 8: + case 9: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.getDefaultInstance(); - case 9: + case 10: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.getDefaultInstance(); - case 10: + case 11: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.getDefaultInstance(); - case 11: + case 12: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.getDefaultInstance(); - case 12: + case 13: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.getDefaultInstance(); - case 13: + case 14: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.getDefaultInstance(); - case 14: + case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.getDefaultInstance(); - case 15: + case 16: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.getDefaultInstance(); - case 16: + case 17: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance(); - case 17: + case 18: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance(); - case 18: + case 19: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); - case 19: + case 20: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -14773,36 +15692,38 @@ case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.getDefaultInstance(); case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance(); + case 5: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance(); - case 5: + case 6: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance(); - case 6: + case 7: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance(); - case 7: + case 8: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance(); - case 8: + case 9: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance(); - case 9: + case 10: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance(); - case 10: + case 11: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance(); - case 11: + case 12: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance(); - case 12: + case 13: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance(); - case 13: + case 14: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance(); - case 14: + case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance(); - case 15: + case 16: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance(); - case 16: + case 17: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(); - case 17: + case 18: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(); - case 18: + case 19: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); - case 19: + case 20: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -14832,6 +15753,11 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest request, com.google.protobuf.RpcCallback done); + public abstract void mergeRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest request, + com.google.protobuf.RpcCallback done); + public abstract void assignRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request, @@ -14955,81 +15881,86 @@ done)); return; case 4: + this.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: this.assignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 5: + case 6: this.unassignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 6: + case 7: this.offlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 7: + case 8: this.deleteTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 8: + case 9: this.enableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 9: + case 10: this.disableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 10: + case 11: this.modifyTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 12: this.createTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 13: this.shutdown(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 14: this.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 15: this.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 16: this.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 16: + case 17: this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 17: + case 18: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 18: + case 19: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 19: + case 20: this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -15057,36 +15988,38 @@ case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest.getDefaultInstance(); case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.getDefaultInstance(); + case 5: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.getDefaultInstance(); - case 5: + case 6: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.getDefaultInstance(); - case 6: + case 7: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.getDefaultInstance(); - case 7: + case 8: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.getDefaultInstance(); - case 8: + case 9: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.getDefaultInstance(); - case 9: + case 10: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.getDefaultInstance(); - case 10: + case 11: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.getDefaultInstance(); - case 11: + case 12: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.getDefaultInstance(); - case 12: + case 13: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.getDefaultInstance(); - case 13: + case 14: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.getDefaultInstance(); - case 14: + case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.getDefaultInstance(); - case 15: + case 16: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.getDefaultInstance(); - case 16: + case 17: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance(); - case 17: + case 18: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance(); - case 18: + case 19: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); - case 19: + case 20: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -15111,36 +16044,38 @@ case 3: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.getDefaultInstance(); case 4: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance(); + case 5: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance(); - case 5: + case 6: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance(); - case 6: + case 7: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance(); - case 7: + case 8: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance(); - case 8: + case 9: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance(); - case 9: + case 10: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance(); - case 10: + case 11: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance(); - case 11: + case 12: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance(); - case 12: + case 13: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance(); - case 13: + case 14: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance(); - case 14: + case 15: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance(); - case 15: + case 16: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance(); - case 16: + case 17: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(); - case 17: + case 18: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(); - case 18: + case 19: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); - case 19: + case 20: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -15223,12 +16158,27 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.getDefaultInstance())); } + public void mergeRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance())); + } + public void assignRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance(), @@ -15243,7 +16193,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance(), @@ -15258,7 +16208,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance(), @@ -15273,7 +16223,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance(), @@ -15288,7 +16238,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance(), @@ -15303,7 +16253,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance(), @@ -15318,7 +16268,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance(), @@ -15333,7 +16283,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance(), @@ -15348,7 +16298,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance(), @@ -15363,7 +16313,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance(), @@ -15378,7 +16328,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance(), @@ -15393,7 +16343,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance(), @@ -15408,7 +16358,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(), @@ -15423,7 +16373,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -15438,7 +16388,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -15453,7 +16403,7 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -15490,6 +16440,11 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest request) throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse mergeRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse assignRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request) @@ -15626,12 +16581,24 @@ } + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse mergeRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse assignRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance()); @@ -15643,7 +16610,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance()); @@ -15655,7 +16622,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance()); @@ -15667,7 +16634,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance()); @@ -15679,7 +16646,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance()); @@ -15691,7 +16658,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance()); @@ -15703,7 +16670,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance()); @@ -15715,7 +16682,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance()); @@ -15727,7 +16694,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance()); @@ -15739,7 +16706,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance()); @@ -15751,7 +16718,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance()); @@ -15763,7 +16730,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance()); @@ -15775,7 +16742,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(16), + getDescriptor().getMethods().get(17), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance()); @@ -15787,7 +16754,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(17), + getDescriptor().getMethods().get(18), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -15799,7 +16766,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(18), + getDescriptor().getMethods().get(19), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -15811,7 +16778,7 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(19), + getDescriptor().getMethods().get(20), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -15861,6 +16828,16 @@ com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_MoveRegionResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_MergeRegionRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MergeRegionRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MergeRegionResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MergeRegionResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_AssignRegionRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -16030,70 +17007,75 @@ "hema\"\026\n\024ModifyColumnResponse\"Z\n\021MoveRegi" + "onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif", "ier\022#\n\016destServerName\030\002 \001(\0132\013.ServerName" + - "\"\024\n\022MoveRegionResponse\"7\n\023AssignRegionRe" + - "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"" + - "\026\n\024AssignRegionResponse\"O\n\025UnassignRegio" + - "nRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" + - "er\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegi" + - "onResponse\"8\n\024OfflineRegionRequest\022 \n\006re" + - "gion\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025OfflineR" + - "egionResponse\"J\n\022CreateTableRequest\022!\n\013t" + - "ableSchema\030\001 \002(\0132\014.TableSchema\022\021\n\tsplitK", - "eys\030\002 \003(\014\"\025\n\023CreateTableResponse\"\'\n\022Dele" + - "teTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023Del" + - "eteTableResponse\"\'\n\022EnableTableRequest\022\021" + - "\n\ttableName\030\001 \002(\014\"\025\n\023EnableTableResponse" + - "\"(\n\023DisableTableRequest\022\021\n\ttableName\030\001 \002" + - "(\014\"\026\n\024DisableTableResponse\"J\n\022ModifyTabl" + - "eRequest\022\021\n\ttableName\030\001 \002(\014\022!\n\013tableSche" + - "ma\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableRes" + - "ponse\"\021\n\017ShutdownRequest\"\022\n\020ShutdownResp" + - "onse\"\023\n\021StopMasterRequest\"\024\n\022StopMasterR", - "esponse\"\020\n\016BalanceRequest\"&\n\017BalanceResp" + - "onse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031SetBalancer" + - "RunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronou" + - "s\030\002 \001(\010\"6\n\032SetBalancerRunningResponse\022\030\n" + - "\020prevBalanceValue\030\001 \001(\010\"\024\n\022CatalogScanRe" + - "quest\")\n\023CatalogScanResponse\022\022\n\nscanResu" + - "lt\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022" + - "\016\n\006enable\030\001 \002(\010\"1\n\034EnableCatalogJanitorR" + - "esponse\022\021\n\tprevValue\030\001 \001(\010\" \n\036IsCatalogJ" + - "anitorEnabledRequest\"0\n\037IsCatalogJanitor", - "EnabledResponse\022\r\n\005value\030\001 \002(\0102\201\n\n\022Maste" + - "rAdminService\0222\n\taddColumn\022\021.AddColumnRe" + - "quest\032\022.AddColumnResponse\022;\n\014deleteColum" + - "n\022\024.DeleteColumnRequest\032\025.DeleteColumnRe" + - "sponse\022;\n\014modifyColumn\022\024.ModifyColumnReq" + - "uest\032\025.ModifyColumnResponse\0225\n\nmoveRegio" + - "n\022\022.MoveRegionRequest\032\023.MoveRegionRespon" + - "se\022;\n\014assignRegion\022\024.AssignRegionRequest" + - "\032\025.AssignRegionResponse\022A\n\016unassignRegio" + - "n\022\026.UnassignRegionRequest\032\027.UnassignRegi", - "onResponse\022>\n\rofflineRegion\022\025.OfflineReg" + - "ionRequest\032\026.OfflineRegionResponse\0228\n\013de" + - "leteTable\022\023.DeleteTableRequest\032\024.DeleteT" + - "ableResponse\0228\n\013enableTable\022\023.EnableTabl" + - "eRequest\032\024.EnableTableResponse\022;\n\014disabl" + - "eTable\022\024.DisableTableRequest\032\025.DisableTa" + - "bleResponse\0228\n\013modifyTable\022\023.ModifyTable" + - "Request\032\024.ModifyTableResponse\0228\n\013createT" + - "able\022\023.CreateTableRequest\032\024.CreateTableR" + - "esponse\022/\n\010shutdown\022\020.ShutdownRequest\032\021.", - "ShutdownResponse\0225\n\nstopMaster\022\022.StopMas" + - "terRequest\032\023.StopMasterResponse\022,\n\007balan" + - "ce\022\017.BalanceRequest\032\020.BalanceResponse\022M\n" + - "\022setBalancerRunning\022\032.SetBalancerRunning" + - "Request\032\033.SetBalancerRunningResponse\022;\n\016" + - "runCatalogScan\022\023.CatalogScanRequest\032\024.Ca" + - "talogScanResponse\022S\n\024enableCatalogJanito" + - "r\022\034.EnableCatalogJanitorRequest\032\035.Enable" + - "CatalogJanitorResponse\022\\\n\027isCatalogJanit" + - "orEnabled\022\037.IsCatalogJanitorEnabledReque", - "st\032 .IsCatalogJanitorEnabledResponse\022L\n\021" + - "execMasterService\022\032.CoprocessorServiceRe" + - "quest\032\033.CoprocessorServiceResponseBG\n*or" + - "g.apache.hadoop.hbase.protobuf.generated" + - "B\021MasterAdminProtosH\001\210\001\001\240\001\001" + "\"\024\n\022MoveRegionResponse\"g\n\022MergeRegionReq" + + "uest\022\021\n\ttableName\030\001 \002(\014\022\023\n\013regionAName\030\002" + + " \002(\014\022\023\n\013regionBName\030\003 \002(\014\022\024\n\005force\030\004 \001(\010" + + ":\005false\"\025\n\023MergeRegionResponse\"7\n\023Assign" + + "RegionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSp" + + "ecifier\"\026\n\024AssignRegionResponse\"O\n\025Unass" + + "ignRegionRequest\022 \n\006region\030\001 \002(\0132\020.Regio" + + "nSpecifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unas" + + "signRegionResponse\"8\n\024OfflineRegionReque", + "st\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025" + + "OfflineRegionResponse\"J\n\022CreateTableRequ" + + "est\022!\n\013tableSchema\030\001 \002(\0132\014.TableSchema\022\021" + + "\n\tsplitKeys\030\002 \003(\014\"\025\n\023CreateTableResponse" + + "\"\'\n\022DeleteTableRequest\022\021\n\ttableName\030\001 \002(" + + "\014\"\025\n\023DeleteTableResponse\"\'\n\022EnableTableR" + + "equest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023EnableTable" + + "Response\"(\n\023DisableTableRequest\022\021\n\ttable" + + "Name\030\001 \002(\014\"\026\n\024DisableTableResponse\"J\n\022Mo" + + "difyTableRequest\022\021\n\ttableName\030\001 \002(\014\022!\n\013t", + "ableSchema\030\002 \002(\0132\014.TableSchema\"\025\n\023Modify" + + "TableResponse\"\021\n\017ShutdownRequest\"\022\n\020Shut" + + "downResponse\"\023\n\021StopMasterRequest\"\024\n\022Sto" + + "pMasterResponse\"\020\n\016BalanceRequest\"&\n\017Bal" + + "anceResponse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031Set" + + "BalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013sy" + + "nchronous\030\002 \001(\010\"6\n\032SetBalancerRunningRes" + + "ponse\022\030\n\020prevBalanceValue\030\001 \001(\010\"\024\n\022Catal" + + "ogScanRequest\")\n\023CatalogScanResponse\022\022\n\n" + + "scanResult\030\001 \001(\005\"-\n\033EnableCatalogJanitor", + "Request\022\016\n\006enable\030\001 \002(\010\"1\n\034EnableCatalog" + + "JanitorResponse\022\021\n\tprevValue\030\001 \001(\010\" \n\036Is" + + "CatalogJanitorEnabledRequest\"0\n\037IsCatalo" + + "gJanitorEnabledResponse\022\r\n\005value\030\001 \002(\0102\274" + + "\n\n\022MasterAdminService\0222\n\taddColumn\022\021.Add" + + "ColumnRequest\032\022.AddColumnResponse\022;\n\014del" + + "eteColumn\022\024.DeleteColumnRequest\032\025.Delete" + + "ColumnResponse\022;\n\014modifyColumn\022\024.ModifyC" + + "olumnRequest\032\025.ModifyColumnResponse\0225\n\nm" + + "oveRegion\022\022.MoveRegionRequest\032\023.MoveRegi", + "onResponse\0229\n\014mergeRegions\022\023.MergeRegion" + + "Request\032\024.MergeRegionResponse\022;\n\014assignR" + + "egion\022\024.AssignRegionRequest\032\025.AssignRegi" + + "onResponse\022A\n\016unassignRegion\022\026.UnassignR" + + "egionRequest\032\027.UnassignRegionResponse\022>\n" + + "\rofflineRegion\022\025.OfflineRegionRequest\032\026." + + "OfflineRegionResponse\0228\n\013deleteTable\022\023.D" + + "eleteTableRequest\032\024.DeleteTableResponse\022" + + "8\n\013enableTable\022\023.EnableTableRequest\032\024.En" + + "ableTableResponse\022;\n\014disableTable\022\024.Disa", + "bleTableRequest\032\025.DisableTableResponse\0228" + + "\n\013modifyTable\022\023.ModifyTableRequest\032\024.Mod" + + "ifyTableResponse\0228\n\013createTable\022\023.Create" + + "TableRequest\032\024.CreateTableResponse\022/\n\010sh" + + "utdown\022\020.ShutdownRequest\032\021.ShutdownRespo" + + "nse\0225\n\nstopMaster\022\022.StopMasterRequest\032\023." + + "StopMasterResponse\022,\n\007balance\022\017.BalanceR" + + "equest\032\020.BalanceResponse\022M\n\022setBalancerR" + + "unning\022\032.SetBalancerRunningRequest\032\033.Set" + + "BalancerRunningResponse\022;\n\016runCatalogSca", + "n\022\023.CatalogScanRequest\032\024.CatalogScanResp" + + "onse\022S\n\024enableCatalogJanitor\022\034.EnableCat" + + "alogJanitorRequest\032\035.EnableCatalogJanito" + + "rResponse\022\\\n\027isCatalogJanitorEnabled\022\037.I" + + "sCatalogJanitorEnabledRequest\032 .IsCatalo" + + "gJanitorEnabledResponse\022L\n\021execMasterSer" + + "vice\022\032.CoprocessorServiceRequest\032\033.Copro" + + "cessorServiceResponseBG\n*org.apache.hado" + + "op.hbase.protobuf.generatedB\021MasterAdmin" + + "ProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -16164,8 +17146,24 @@ new java.lang.String[] { }, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.Builder.class); + internal_static_MergeRegionRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_MergeRegionRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MergeRegionRequest_descriptor, + new java.lang.String[] { "TableName", "RegionAName", "RegionBName", "Force", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest.Builder.class); + internal_static_MergeRegionResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_MergeRegionResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MergeRegionResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionResponse.Builder.class); internal_static_AssignRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(10); internal_static_AssignRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_AssignRegionRequest_descriptor, @@ -16173,7 +17171,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.Builder.class); internal_static_AssignRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_AssignRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_AssignRegionResponse_descriptor, @@ -16181,7 +17179,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.Builder.class); internal_static_UnassignRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_UnassignRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UnassignRegionRequest_descriptor, @@ -16189,7 +17187,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.Builder.class); internal_static_UnassignRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_UnassignRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UnassignRegionResponse_descriptor, @@ -16197,7 +17195,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.Builder.class); internal_static_OfflineRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_OfflineRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_OfflineRegionRequest_descriptor, @@ -16205,7 +17203,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.Builder.class); internal_static_OfflineRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_OfflineRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_OfflineRegionResponse_descriptor, @@ -16213,7 +17211,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.Builder.class); internal_static_CreateTableRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_CreateTableRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CreateTableRequest_descriptor, @@ -16221,7 +17219,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.Builder.class); internal_static_CreateTableResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_CreateTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CreateTableResponse_descriptor, @@ -16229,7 +17227,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.Builder.class); internal_static_DeleteTableRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_DeleteTableRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteTableRequest_descriptor, @@ -16237,7 +17235,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.Builder.class); internal_static_DeleteTableResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_DeleteTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteTableResponse_descriptor, @@ -16245,7 +17243,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.Builder.class); internal_static_EnableTableRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_EnableTableRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableTableRequest_descriptor, @@ -16253,7 +17251,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.Builder.class); internal_static_EnableTableResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_EnableTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableTableResponse_descriptor, @@ -16261,7 +17259,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.Builder.class); internal_static_DisableTableRequest_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_DisableTableRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DisableTableRequest_descriptor, @@ -16269,7 +17267,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.Builder.class); internal_static_DisableTableResponse_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_DisableTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DisableTableResponse_descriptor, @@ -16277,7 +17275,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.Builder.class); internal_static_ModifyTableRequest_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_ModifyTableRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ModifyTableRequest_descriptor, @@ -16285,7 +17283,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.Builder.class); internal_static_ModifyTableResponse_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_ModifyTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ModifyTableResponse_descriptor, @@ -16293,7 +17291,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.Builder.class); internal_static_ShutdownRequest_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_ShutdownRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ShutdownRequest_descriptor, @@ -16301,7 +17299,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.Builder.class); internal_static_ShutdownResponse_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_ShutdownResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ShutdownResponse_descriptor, @@ -16309,7 +17307,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.Builder.class); internal_static_StopMasterRequest_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_StopMasterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopMasterRequest_descriptor, @@ -16317,7 +17315,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.Builder.class); internal_static_StopMasterResponse_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_StopMasterResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopMasterResponse_descriptor, @@ -16325,7 +17323,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.Builder.class); internal_static_BalanceRequest_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(30); internal_static_BalanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BalanceRequest_descriptor, @@ -16333,7 +17331,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.Builder.class); internal_static_BalanceResponse_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(31); internal_static_BalanceResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BalanceResponse_descriptor, @@ -16341,7 +17339,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.Builder.class); internal_static_SetBalancerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(32); internal_static_SetBalancerRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetBalancerRunningRequest_descriptor, @@ -16349,7 +17347,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.Builder.class); internal_static_SetBalancerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(33); internal_static_SetBalancerRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetBalancerRunningResponse_descriptor, @@ -16357,7 +17355,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.Builder.class); internal_static_CatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(34); internal_static_CatalogScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CatalogScanRequest_descriptor, @@ -16365,7 +17363,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.Builder.class); internal_static_CatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(35); internal_static_CatalogScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CatalogScanResponse_descriptor, @@ -16373,7 +17371,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.Builder.class); internal_static_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(36); internal_static_EnableCatalogJanitorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableCatalogJanitorRequest_descriptor, @@ -16381,7 +17379,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.Builder.class); internal_static_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(37); internal_static_EnableCatalogJanitorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableCatalogJanitorResponse_descriptor, @@ -16389,7 +17387,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.Builder.class); internal_static_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(38); internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsCatalogJanitorEnabledRequest_descriptor, @@ -16397,7 +17395,7 @@ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.Builder.class); internal_static_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(39); internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsCatalogJanitorEnabledResponse_descriptor, Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MergeRequest.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MergeRequest.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MergeRequest.java (revision 0) @@ -0,0 +1,238 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.master.RegionMergeManager.MergeTransactionData; +import org.apache.hadoop.hbase.master.MergeTransaction.JournalState; +import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.zookeeper.KeeperException; + +import com.google.common.base.Preconditions; + +/** + * Handles processing region merges. Put in a queue, owned by MergeManager. + */ +class MergeRequest implements Runnable { + static final Log LOG = LogFactory.getLog(MergeRequest.class); + + private final MergeTransactionData transactionData; + private JournalState initialState; + private final RegionMergeManager mergeManager; + private final MasterServices masterServices; + + private static Class mergeTransactionClass; + + MergeRequest(final RegionMergeManager mergeManager, + final MergeTransactionData transactionData, JournalState initialState, + MasterServices masterServices) { + Preconditions.checkNotNull(masterServices); + this.transactionData = transactionData; + this.initialState = initialState; + this.mergeManager = mergeManager; + this.masterServices = masterServices; + } + + @Override + public void run() { + if (this.masterServices.isStopped()) { + LOG.debug("Skipping merge because server is stopped"); + return; + } + MonitoredTask status = TaskMonitor.get().createStatus( + "Merging " + transactionData + " with initialState=" + initialState); + try { + mergeManager.waitUntilMasterInitialized(); + HRegionInfo regionInfoA = this.mergeManager.getRegionInfoByEncodedName( + transactionData.getTableName(), transactionData.getRegionA()); + HRegionInfo regionInfoB = this.mergeManager.getRegionInfoByEncodedName( + transactionData.getTableName(), transactionData.getRegionB()); + if ((regionInfoA == null || regionInfoB == null)) { + if (!masterServices.getAssignmentManager().getZKTable() + .isTablePresent(transactionData.getTableName())) { + String msg = "Skip merging " + transactionData + + ", because table is not present now"; + LOG.info(msg); + status.abort(msg); + skipRequest(regionInfoA, regionInfoB); + return; + } else if (this.initialState == JournalState.CREATE_MERGE) { + String msg = "Skip merging " + transactionData + + ", because couldn't get the regioninfo, initialState=" + + initialState; + LOG.info(msg); + status.abort(msg); + skipRequest(regionInfoA, regionInfoB); + return; + } else if (this.initialState != JournalState.COMPLETE_MERGING) { + String msg = "Resubmit merging, because couldn't get the regioninfo, and initialState=" + + initialState; + LOG.warn(msg); + status.abort(msg); + mergeManager.addToResubmitQueue(transactionData); + return; + } + } else { + if (!transactionData.isForce() && !areAdjacent(regionInfoA, regionInfoB)) { + String msg = "Skip merging " + transactionData + + ", because two regions are not adjacent, regionA=" + + regionInfoA.getRegionNameAsString() + ",regionB=" + + regionInfoB.getRegionNameAsString(); + LOG.info(msg); + status.abort(msg); + skipRequest(regionInfoA, regionInfoB); + return; + } + } + status.setStatus("Starting merging"); + LOG.info("Start merging " + transactionData + ", initialState=" + + initialState); + MergeTransaction mt = null; + int triesNumber = masterServices.getConfiguration().getInt( + "hbase.merge.number.tries", 5); + for (int i = 0; i < triesNumber; i++) { + try { + mt = createMergeTransaction(masterServices.getConfiguration(), + masterServices, mergeManager, transactionData, regionInfoA, + regionInfoB, initialState, status); + mt.execute(this.masterServices); + break; + } catch (IOException e) { + if (i == triesNumber - 1 || masterServices.isStopped()) { + throw e; + } + LOG.warn("Failed executing merge, " + transactionData + ", try=" + + (i + 1), e); + if (mt != null) { + this.initialState = mt.getCurrentState(); + } + } + } + HRegionInfo mergedRegion=mt.getMergedRegion(); + String msg = "Complete merging "+ transactionData + + ", successful=" + mt.isSuccessful() + + (mergedRegion == null ? "" : ", merged region=" + + mergedRegion.getRegionNameAsString()); + LOG.info(msg); + status.markComplete(msg); + } catch (InterruptedException e) { + String msg = "Merging" + transactionData + "interrupted by user "; + LOG.warn(msg, e); + status.abort(msg); + Thread.currentThread().interrupt(); + } catch (IOException e) { + String msg = "IOException in merging " + transactionData; + LOG.warn(msg, e); + status.abort(msg); + if (masterServices.getMasterFileSystem().checkFileSystem()) { + this.mergeManager.addToResubmitQueue(transactionData); + } + } catch (KeeperException e) { + String msg = "KeeperException in merging " + transactionData; + LOG.error(msg, e); + status.abort(msg); + masterServices.abort(msg, e); + } catch (Throwable e) { + String msg = "Unknown Exception in merging " + transactionData; + LOG.error(msg, e); + status.abort(msg); + this.mergeManager.addToResubmitQueue(transactionData); + } finally { + status.cleanup(); + } + } + + private void skipRequest(HRegionInfo regionInfoA, HRegionInfo regionInfoB) + throws InterruptedException, KeeperException { + if(regionInfoA!=null) this.mergeManager.assignMergingRegionIfOffline(regionInfoA); + if(regionInfoB!=null) this.mergeManager.assignMergingRegionIfOffline(regionInfoB); + this.mergeManager.finishMergeTransaction(transactionData, true); + } + + /** + * Check whether two regions are adjacent + * @param regionA + * @param regionB + * @return true if two regions are adjacent + */ + public static boolean areAdjacent(HRegionInfo regionA, HRegionInfo regionB) { + HRegionInfo a = regionA; + HRegionInfo b = regionB; + if (Bytes.compareTo(a.getStartKey(), b.getStartKey()) > 0) { + a = regionB; + b = regionA; + } + if (Bytes.compareTo(a.getEndKey(), b.getStartKey()) == 0) { + return true; + } + return false; + } + + /** + * Utility for constructing an instance of MergeTransaction class. + * @param conf + * @param masterServices + * @param mergeManager + * @param transactionData + * @param mergeA + * @param mergeB + * @param initialState + * @return instance of MergeTransaction class + */ + static MergeTransaction createMergeTransaction(final Configuration conf, + final MasterServices masterServices, final RegionMergeManager mergeManager, + final MergeTransactionData transactionData, final HRegionInfo mergeA, + final HRegionInfo mergeB, final JournalState initialState, + final MonitoredTask status) { + if (mergeTransactionClass == null) { + mergeTransactionClass = conf.getClass( + "hbase.master.merge.transaction.impl", MergeTransaction.class, + MergeTransaction.class); + } + try { + Constructor c = mergeTransactionClass + .getConstructor(MasterServices.class, RegionMergeManager.class, + MergeTransactionData.class, HRegionInfo.class, HRegionInfo.class, + JournalState.class, MonitoredTask.class); + return c.newInstance(masterServices, mergeManager, transactionData, + mergeA, mergeB, initialState, status); + } catch (InvocationTargetException ite) { + Throwable target = ite.getTargetException() != null ? ite + .getTargetException() : ite; + if (target.getCause() != null) + target = target.getCause(); + throw new RuntimeException("Failed construction of MergeTransaction: " + + mergeTransactionClass.toString(), target); + } catch (Exception e) { + throw new RuntimeException("Failed construction of MergeTransaction: " + + mergeTransactionClass.toString() + + ((e.getCause() != null) ? e.getCause().getMessage() : ""), e); + } + } + +} Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1443294) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -353,6 +353,15 @@ /** The upper-half split region column qualifier */ public static final byte [] SPLITB_QUALIFIER = toBytes("splitB"); + /** The lower-half merge region column qualifier */ + public static final byte[] MERGEA_QUALIFIER = toBytes("mergeA"); + + /** The upper-half merge region column qualifier */ + public static final byte[] MERGEB_QUALIFIER = toBytes("mergeB"); + + /** The merged region column qualifier */ + public static final byte[] MERGED_QUALIFIER = toBytes("merged"); + /** * The meta table version column qualifier. * We keep current version of the meta table in this column in -ROOT- Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java (working copy) @@ -117,6 +117,10 @@ + regionInfo.getRegionNameAsString() + " but " + "this table is disabled, triggering close of region"); assignmentManager.unassign(regionInfo); + } else if (this.assignmentManager.isRegionInMerging(regionInfo)) { + LOG.debug("Opened region " + regionInfo.getRegionNameAsString() + + " but " + "this region is in merging, triggering close of region"); + assignmentManager.unassign(regionInfo); } } } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MergeTransaction.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MergeTransaction.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MergeTransaction.java (revision 0) @@ -0,0 +1,565 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaEditor; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.RegionMergeManager.MergeTransactionData; +import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; + +/** + * Executes region merge as a "transaction". Call {@link #prepare} to check + * whether the transaction is available, {@link #offlineRegion} to offline + * merging regions, {@link #executeMerging} to merge files on fs, + * {@link #completeMerging} and {@link #cancelMerging} to remove useless data. + * + *

+ * This class is not thread safe. Caller needs to ensure merge is run by one + * thread only. + */ +public class MergeTransaction { + private static final Log LOG = LogFactory.getLog(MergeTransaction.class); + + private final HTableDescriptor htd; + protected final HRegionInfo regionA; + protected final HRegionInfo regionB; + // Region C merged from regionA and regionB + protected HRegionInfo mergedRegion; + private final MergeTransactionData transactionData; + private final RegionMergeManager mergeManager; + + // Which state does this merge transaction start from + private final JournalState initialState; + // Current state of this merge transaction + protected JournalState currentState; + + private boolean successful = false; + + private final Path rootDir; + private final FileSystem fs; + + private final MonitoredTask status; + + /** + * State of the transaction journal. Each enum is a state in the merge + * transaction. Used to redo after master restart + */ + enum JournalState { + /** + * Initial state when requesting a merge + */ + CREATE_MERGE, + /** + * Enter the offline region state after {@link #prepare()} + */ + OFFLINE_REGION, + /** + * Enter the state executing merge after {@link #offlineRegion()} + */ + EXECUTE_MERGING, + /** + * Enter the state completing merging after region merged + */ + COMPLETE_MERGING, + /** + * Enter the state canceling merging if merge transaction is unavailable + */ + CANCEL_MERGING, + /** + * State representing transaction being over + */ + END + } + + public MergeTransaction(final MasterServices masterServices, + final RegionMergeManager mergeManager, + final MergeTransactionData transactionData, final HRegionInfo mergeA, + final HRegionInfo mergeB, final JournalState initialState, + final MonitoredTask status) throws FileNotFoundException, IOException { + this.rootDir = masterServices.getMasterFileSystem().getRootDir(); + this.fs = masterServices.getMasterFileSystem().getFileSystem(); + this.mergeManager = mergeManager; + this.transactionData = transactionData; + this.htd = masterServices.getTableDescriptors().get( + transactionData.getTableName()); + // Make sure regionA < regionB + if (mergeA == null || mergeA.compareTo(mergeB) < 0) { + this.regionA = mergeA; + this.regionB = mergeB; + } else { + this.regionA = mergeB; + this.regionB = mergeA; + } + this.initialState = initialState; + this.currentState = initialState; + this.status = status; + } + + /** + * Does checks for merge inputs. + * + * Set state to OFFLINE_REGION if the merge is available else set the state to + * CANCEL_MERGING if it is not(e.g. merge region is a parent or has + * reference). + * @param masterServices + * @throws IOException + * @throws KeeperException + * @throws NoNodeException + */ + void prepare(MasterServices masterServices) throws IOException, + NoNodeException, KeeperException { + status.setStatus("Preparing..."); + boolean available = true; + if (regionA.isSplit() || regionA.isOffline()) { + available = false; + LOG.debug("Region " + regionA.getRegionNameAsString() + + " is split or offline"); + } else if (regionB.isSplit() || regionB.isOffline()) { + available = false; + LOG.debug("Region " + regionB.getRegionNameAsString() + + " is split or offline"); + } else if (HRegion.containsReferenceFile(fs, rootDir, regionA, htd)) { + available = false; + LOG.debug("Region " + regionA.getRegionNameAsString() + + " has reference , cancel the merge"); + } else if (HRegion.containsReferenceFile(fs, rootDir, regionB, htd)) { + available = false; + LOG.debug("Region " + regionB.getRegionNameAsString() + + " has reference , cancel the merge"); + } else if (!cleanReferenceIfMergedRegion(masterServices, regionA)) { + available = false; + LOG.debug("Region " + regionA.getRegionNameAsString() + + " was merged before, but couldn't clean reference"); + } else if (!cleanReferenceIfMergedRegion(masterServices, regionB)) { + available = false; + LOG.debug("Region " + regionB.getRegionNameAsString() + + " was merged before, but couldn't clean reference"); + } else if (!ensureNoParentRegion()) { + available = false; + LOG.debug("Exist parent region and can't be cleared, cancel the merge"); + } + if (available) { + this.currentState = JournalState.OFFLINE_REGION; + } else { + this.currentState = JournalState.CANCEL_MERGING; + } + setJournalStateOnZK(); + } + + /** + * Run the transaction according to the current state + * @param masterServices + * @throws IOException + * @throws KeeperException + * @throws InterruptedException + */ + public void execute(MasterServices masterServices) throws IOException, + KeeperException, InterruptedException { + while (!masterServices.isStopped()) { + if (this.currentState.equals(JournalState.END)) { + break; + } + switch (this.currentState) { + case CREATE_MERGE: + prepare(masterServices); + break; + case OFFLINE_REGION: + offlineRegion(masterServices); + break; + case EXECUTE_MERGING: + executeMerging(masterServices); + break; + case COMPLETE_MERGING: + completeMerging(masterServices); + break; + case CANCEL_MERGING: + cancelMerging(masterServices); + break; + default: + throw new RuntimeException("Unhandled merge state: " + currentState); + } + } + } + + /** + * Offline the two merging regions + * @param masterServices + * @throws KeeperException + * @throws InterruptedException + * @throws IOException + */ + void offlineRegion(MasterServices masterServices) throws KeeperException, + InterruptedException, IOException { + status.setStatus("Offline region..."); + masterServices.getAssignmentManager().unassign(this.regionA, true); + masterServices.getAssignmentManager().unassign(this.regionB, true); + mergeManager.waitUntilRegionsOffline(this.regionA, this.regionB); + mergeManager.waitUntilNoDeadServerInProcess(masterServices); + boolean conditionA = false; + if (mergeManager.isRegionSplit(this.regionA) + || mergeManager.isRegionSplit(this.regionB)) { + LOG.info("Canceling merge because region " + + (mergeManager.isRegionSplit(this.regionA) ? regionA + .getEncodedName() : regionB.getEncodedName()) + " has been split"); + this.currentState = JournalState.CANCEL_MERGING; + } else if ((conditionA = containsEditsLogFile(this.regionA)) + || containsEditsLogFile(this.regionB)) { + LOG.info("Canceling merge because region " + + (conditionA ? regionA.getEncodedName() : regionB.getEncodedName()) + + " has edits log file"); + this.currentState = JournalState.CANCEL_MERGING; + }else { + this.currentState = JournalState.EXECUTE_MERGING; + } + setJournalStateOnZK(); + } + + /** + * Merging the files in HDFS, add merged region to META and assign it + * @param masterServices + * @throws NoNodeException + * @throws KeeperException + * @throws IOException + */ + void executeMerging(MasterServices masterServices) throws NoNodeException, + KeeperException, IOException { + status.setStatus("Merging regions in fs..."); + this.mergedRegion = HRegionInfo.generateFixedRegionByMerge(htd, regionA, + regionB); + Pair mergedRegionInMeta = null; + boolean isMergedRegionSplit = false; + if (this.initialState == JournalState.EXECUTE_MERGING) { + // It is a redo from this step + mergedRegionInMeta = MetaReader.getRegion( + masterServices.getCatalogTracker(), mergedRegion.getRegionName()); + if (mergedRegionInMeta == null) { + isMergedRegionSplit = checkRegionSplit( + masterServices.getCatalogTracker(), mergedRegion, regionA); + LOG.debug("Redo merging " + transactionData + "from " + initialState + + "and check whether merged region" + + mergedRegion.getRegionNameAsString() + " is split =" + + isMergedRegionSplit); + } + } + if (!isMergedRegionSplit && mergedRegionInMeta == null) { + this.mergedRegion = HRegion.merge(masterServices.getConfiguration(), fs, + rootDir, htd, regionA, regionB); + offlineMergingRegionInMeta(masterServices.getCatalogTracker(), + regionA, mergedRegion, regionB, true); + offlineMergingRegionInMeta(masterServices.getCatalogTracker(), + regionB, mergedRegion, regionA, false); + MetaEditor.addMergedRegionToMeta(masterServices.getCatalogTracker(), + this.mergedRegion, regionA, regionB); + } + status.setStatus("Assigning merged region " + + mergedRegion.getRegionNameAsString()); + if (!isMergedRegionSplit + && (mergedRegionInMeta == null || mergedRegionInMeta.getSecond() == null)) { + if (mergeManager.isRegionOffline(mergedRegion)) { + masterServices.getAssignmentManager().assign(this.mergedRegion, true); + if (masterServices.isStopped()) { + // This assign may be skipped, so we shouldn't set state on zk. + throw new IOException("Server is stopped..."); + } + } + } + this.currentState = JournalState.COMPLETE_MERGING; + setJournalStateOnZK(); + } + + /** + * Offline merging region in META. Creating it as a method is used to test + * @param catalogTracker + * @param merging + * @param merged + * @param anotherMerging + * @param left + * @throws IOException + */ + protected void offlineMergingRegionInMeta(CatalogTracker catalogTracker, + HRegionInfo merging, HRegionInfo merged, HRegionInfo anotherMerging, + boolean left) throws IOException { + MetaEditor.offlineMergingRegionInMeta(catalogTracker, merging, merged, + anotherMerging, left); + } + + /** + * Delete merging region in META. Creating it as a method is used to test + * @param catalogTracker + * @param regionInfo + * @throws IOException + */ + protected void deleteMergingRegionInMeta(CatalogTracker catalogTracker, + HRegionInfo regionInfo) throws IOException { + MetaEditor.deleteRegion(catalogTracker, regionInfo); + } + + /** + * Delete merging regions from META after completing the merge successfully + * @param masterServices + * @throws KeeperException + * @throws IOException + * @throws InterruptedException + */ + void completeMerging(MasterServices masterServices) throws KeeperException, + IOException, InterruptedException { + status.setStatus("Completing merging... "); + // Remove regions from .META. + if (this.regionA != null) { + deleteMergingRegionInMeta(masterServices.getCatalogTracker(), + this.regionA); + } + if (this.regionB != null) { + deleteMergingRegionInMeta(masterServices.getCatalogTracker(), + this.regionB); + } + this.mergeManager.waitUntilNoDeadServerInProcess(masterServices); + this.mergeManager.finishMergeTransaction(transactionData, true); + this.currentState = JournalState.END; + this.successful = true; + } + + /** + * Merge is unavailable, cancel it; Assign the merging region if it is offline + * because of merging + * @param masterServices + * @throws NoNodeException + * @throws KeeperException + * @throws InterruptedException + */ + void cancelMerging(MasterServices masterServices) throws NoNodeException, + KeeperException, InterruptedException { + status.setStatus("Canceling merging... "); + this.mergeManager.assignMergingRegionIfOffline(this.regionA); + this.mergeManager.assignMergingRegionIfOffline(this.regionB); + this.mergeManager.finishMergeTransaction(transactionData, true); + this.currentState = JournalState.END; + } + + protected void setJournalStateOnZK() throws NoNodeException, + KeeperException { + LOG.debug("Setting state " + currentState + " on zk for merging request " + + transactionData); + mergeManager.setJournalStateOnZK(transactionData.getTransactionName(), + this.currentState); + } + + /** + * Check whether region has edits log file + * @param region + * @return true if region contains edits log file + * @throws IOException + */ + private boolean containsEditsLogFile(final HRegionInfo region) + throws IOException { + Path tableDir = new Path(rootDir, region.getTableNameAsString()); + Path regiondir = HRegion.getRegionDir(tableDir, region.getEncodedName()); + Set files = HLogUtil.getSplitEditFilesSorted(fs, regiondir); + return files != null && !files.isEmpty(); + } + + /** + * Between offline region and start merging, make sure there is no parent + * region of the two regions in .META. If exists, fixing up daughters would + * cause daughter regions(we have merged one) online again when we restart + * master, so we clear the parent region in this function to prevent the above + * case + * @return true if there is no parent region of merging regions + * @throws IOException + */ + private boolean ensureNoParentRegion() throws IOException { + Map splitParents = mergeManager + .getSplitParents(regionA.getTableName()); + for (Map.Entry splitParent : splitParents.entrySet()) { + // check whether its range contains merging region + if (doRegionsOverlap(regionA, splitParent.getKey()) + || doRegionsOverlap(regionB, splitParent.getKey())) { + LOG.warn("Region " + regionA.getEncodedName() + " or region " + + regionB.getEncodedName() + " has parent region " + + splitParent.getKey().getRegionNameAsString() + + ", try to clean it"); + if (!this.mergeManager.cleanParent(splitParent.getKey(), + splitParent.getValue())) { + LOG.warn("Failed cleaning parent region " + + splitParent.getKey().getRegionNameAsString() + + ", canceling merge"); + return false; + } + } + } + + return true; + } + + /** + * If the specified region was merged before, make sure its references have + * been cleaned + * @param masterServices + * @param checkRegion + * @return true if merging regions were not merged or their references been + * cleaned + * @throws IOException + */ + private boolean cleanReferenceIfMergedRegion(MasterServices masterServices, HRegionInfo checkRegion) + throws IOException { + Result result = MetaReader.getRegionResult( + masterServices.getCatalogTracker(), checkRegion.getRegionName()); + assert !(result == null || result.isEmpty()); + HRegionInfo mergeA = HRegionInfo.getHRegionInfo(result, + HConstants.MERGEA_QUALIFIER); + HRegionInfo mergeB = HRegionInfo.getHRegionInfo(result, + HConstants.MERGEB_QUALIFIER); + if (mergeA == null && mergeB == null) { + return true; + } + assert !(mergeA == null || mergeB == null); + return this.mergeManager.cleanMergeRegion(checkRegion, mergeA, mergeB); + } + + /** + * Check whether the region is split from the META + * @param catalogTracker + * @param checkRegion + * @param excludeRegion + * @param result + * @return true if region is split + * @throws IOException + */ + static boolean checkRegionSplit(final CatalogTracker catalogTracker, + final HRegionInfo checkRegion, final HRegionInfo excludeRegion) + throws IOException { + final AtomicBoolean isSplit = new AtomicBoolean(false); + byte[] startRow = checkRegion.getRegionName(); + final byte[] startKey = checkRegion.getStartKey(); + final byte[] endKey = checkRegion.getEndKey(); + final byte[] tableName = checkRegion.getTableName(); + MetaReader.fullScan(catalogTracker, new MetaReader.Visitor() { + @Override + public boolean visit(Result r) throws IOException { + if (r == null || r.isEmpty()) return true; + HRegionInfo info = HRegionInfo.getHRegionInfo(r); + if (info == null) return true; // Keep scanning + // other table,stop scanning + if (Bytes.compareTo(info.getTableName(), tableName) > 0) return false; + if (Bytes.compareTo(info.getTableName(), tableName) == 0) { + if (Bytes.compareTo(info.getStartKey(), startKey) == 0) { + if (Bytes.compareTo(info.getEndKey(), endKey) == 0) { + // find itself,keep scanning + return true; + } else if (info.equals(excludeRegion)) { + return true; + } + isSplit.set(true); + return false; + } else if (Bytes.compareTo(info.getStartKey(), startKey) > 0) { + return false; + } + } + return true; + } + }, startRow); + return isSplit.get(); + } + + public boolean isSuccessful() { + return this.successful; + } + + public HRegionInfo getMergedRegion() { + return this.mergedRegion; + } + + public JournalState getCurrentState() { + return this.currentState; + } + + /** + * Check whether there is overlap between region A and region B + * @param regionA + * @param regionB + * @return true if regions' key ranges overlap + */ + private static boolean doRegionsOverlap(HRegionInfo regionA, + HRegionInfo regionB) { + // if A's startKey < B's startkey, swap them + if (Bytes.compareTo(regionA.getStartKey(), regionB.getStartKey()) < 0) { + return doRegionsOverlap(regionB, regionA); + } else { + if (Bytes.compareTo(regionA.getStartKey(), regionB.getEndKey()) < 0 + || Bytes.equals(regionB.getEndKey(), HConstants.EMPTY_END_ROW)) { + return true; + } else { + return false; + } + } + } + + /** + * Get the bytes for a specified JournalState + * @param state + * @return the bytes of given journal state + */ + static byte[] getBytesFromJournalState(JournalState state) { + return Bytes.toBytes(state.toString()); + + } + + /** + * Get an JournalState from bytes. Throws a {@link RuntimeException} if the + * data represents unknown bytes + * @param data + * @return the journal state as the given bytes + */ + static JournalState parseJournalStateFromBytes(byte[] data) { + String stateString = Bytes.toString(data); + for (JournalState state : JournalState.values()) { + if (state.toString().equals(stateString)) + return state; + } + throw new RuntimeException("Unknown JournalState " + stateString); + } +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy) @@ -102,6 +102,8 @@ protected final Server server; private ServerManager serverManager; + + private RegionMergeManager mergeManager; private CatalogTracker catalogTracker; @@ -1026,6 +1028,11 @@ LOG.info("Opened region " + regionNameStr + "but this table is disabled, triggering close of region"); unassign(regionInfo); + } else if (isRegionInMerging(regionInfo)) { + LOG.debug("Opened region " + + regionInfo.getRegionNameAsString() + " but " + + "this region is in merging, triggering close of region"); + unassign(regionInfo); } } } @@ -1564,6 +1571,10 @@ this.timeoutMonitor.setAllRegionServersOffline(true); return; // Should get reassigned later when RIT times out. } + if (this.server.isStopped()) { + LOG.debug("Server stopped; skipping assign of " + region); + return; + } if (setOfflineInZK && versionOfOfflineNode == -1) { // get the version of the znode after setting it to OFFLINE. // versionOfOfflineNode will be -1 if the znode was not set to OFFLINE @@ -1588,10 +1599,6 @@ if (setOfflineInZK && versionOfOfflineNode == -1) { return; } - if (this.server.isStopped()) { - LOG.debug("Server stopped; skipping assign of " + region); - return; - } try { LOG.info("Assigning region " + region.getRegionNameAsString() + " to " + plan.getDestination().toString()); @@ -2180,6 +2187,8 @@ Map allRegions = MetaReader.fullScan( catalogTracker, disabledOrDisablingOrEnabling, true); if (allRegions == null || allRegions.isEmpty()) return; + // Remove the merging regions + removeMergingRegions(allRegions); // Determine what type of assignment to do on startup boolean retainAssignment = server.getConfiguration(). @@ -2754,7 +2763,11 @@ */ public void handleSplitReport(final ServerName sn, final HRegionInfo parent, final HRegionInfo a, final HRegionInfo b) { + if (this.mergeManager != null) { + this.mergeManager.notifyRegionSplit(parent); + } regionOffline(parent); + regionOnline(a, sn); regionOnline(b, sn); @@ -2836,4 +2849,90 @@ } return true; } + + void setMergeManager(RegionMergeManager mergeManager) { + assert this.mergeManager == null; + this.mergeManager = mergeManager; + } + + public RegionMergeManager getMergeManager() { + return this.mergeManager; + } + + public boolean isRegionInMerging(HRegionInfo regionInfo) { + return this.mergeManager != null + && this.mergeManager.isRegionInMerging(regionInfo); + } + + /** + * Offline the specified region which is merging now + * @param regionInfo + */ + public void offlineMergingRegion(HRegionInfo regionInfo) { + // Region is merging so should not be reassigned, just delete the CLOSED + // node + LOG.debug("Region is in merging so deleting ZK node and removing from " + + "regions in transition, skipping assignment of region " + + regionInfo.getRegionNameAsString()); + try { + if (!ZKAssign.deleteClosedNode(watcher, regionInfo.getEncodedName())) { + // Could also be in OFFLINE mode + ZKAssign.deleteOfflineNode(watcher, regionInfo.getEncodedName()); + } + } catch (KeeperException.NoNodeException nne) { + LOG.debug("Tried to delete closed node for " + regionInfo + " but it " + + "does not exist so just offlining"); + } catch (KeeperException e) { + this.server.abort("Error deleting CLOSED node in ZK for region " + + regionInfo.getEncodedName(), e); + } + regionOffline(regionInfo); + if (this.mergeManager != null) { + this.mergeManager.notifyRegionOffline(regionInfo); + } + } + + /** + * Remove the merging regions from the specified map of regions + * @param regions + */ + private void removeMergingRegions(Map regions) { + if (this.mergeManager != null && this.mergeManager.isAnyRegionInMerging()) { + Iterator iterator = regions.keySet().iterator(); + while (iterator.hasNext()) { + if (this.mergeManager.isRegionInMerging(iterator.next())) { + iterator.remove(); + } + } + } + } + + /** + * Get the merging region for the specified list of regions + * @param regions + * @return list of region which are merging + */ + public List getMergingRegions(List regions) { + List regionsInMerging = new ArrayList(); + if (this.mergeManager != null && this.mergeManager.isAnyRegionInMerging()) { + for (HRegionInfo region : regions) { + if (this.mergeManager.isRegionInMerging(region)) { + regionsInMerging.add(region); + } + } + } + return regionsInMerging; + } + + /** + * Get the merging region num for the specified table + * @param tableName + * @return number of merging regions of given table + */ + public int getNumOfMergingRegionsForTable(String tableName) { + if (this.mergeManager != null) { + return mergeManager.getNumOfMergingRegionsForTable(tableName); + } + return 0; + } } Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTransaction.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTransaction.java (revision 0) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMergeTransaction.java (revision 0) @@ -0,0 +1,826 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.List; +import java.util.NavigableMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.master.MergeTransaction.JournalState; +import org.apache.hadoop.hbase.master.RegionMergeManager.MergeTransactionData; +import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.base.Joiner; + +/** + * Test the {@link MergeTransaction} class against two merging regions. + */ +@Category(LargeTests.class) +public class TestMergeTransaction { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final Log LOG = LogFactory.getLog(TestMergeTransaction.class); + private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); + private static final int slaves = 3; + + private static byte[] ROW = Bytes.toBytes("testRow"); + private static final int INITIAL_REGION_NUM = 10; + private static final int ROWSIZE = 200; + private static byte[][] ROWS = makeN(ROW, ROWSIZE); + + private static int waitTime = 120 * 1000; + + private enum ExceptionLocation { + NONE, + BEFORE_OFFLINE, + BEFORE_EXECUTE, + BEFORE_COMPLETE, + BEFORE_CANCEL, + SET_ZK + } + + // Throw exception in certain step, simulating redo + private static ExceptionLocation exceptionThrownLocation = ExceptionLocation.NONE; + + private static TestingMergeTransaction currentMergeTransaction; + + private static HTable checkedTableForConsistentView = null; + private static ExecutorService metaEntriesConsistencyCheckPool = Executors + .newFixedThreadPool(4); + private static List> metaEntriesConsistencyCheckResults = new ArrayList>(); + + // If true, we will check merged and merging regions qualifiers in META after + // executeMerging(). This test won't depend on ZK, so we don't set journal + // state on ZK if it is true. + private static boolean checkQualifierInMeta = false; + + private static boolean forceEnterCancelState = false; + + private static AtomicInteger successfullyCompletedCount = new AtomicInteger(0); + + @BeforeClass + public static void beforeAllTests() throws Exception { + TEST_UTIL.getConfiguration().setClass( + "hbase.master.merge.transaction.impl", TestingMergeTransaction.class, + MergeTransaction.class); + TEST_UTIL.getConfiguration().setBoolean("hbase.hregion.memstore.mslab.enabled", false); + TEST_UTIL.getConfiguration().setInt("hbase.merge.number.tries", 1); + TEST_UTIL.getConfiguration().setInt( + "hbase.bulk.assignment.perregionserver.rpc.waittime", 3000); + TEST_UTIL.getConfiguration().setInt( + "hbase.regionserver.rpc.startup.waittime", 10000); + // Start a cluster of two regionservers. + TEST_UTIL.startMiniCluster(slaves); + } + + @AfterClass + public static void afterAllTests() throws Exception { + metaEntriesConsistencyCheckPool.shutdownNow(); + TEST_UTIL.shutdownMiniCluster(); + } + + @After + public void after() { + currentMergeTransaction = null; + checkedTableForConsistentView = null; + forceEnterCancelState = false; + checkQualifierInMeta = false; + } + + @Test + public void testWholesomeMerge() throws Exception { + LOG.info("Starting testWholesomeMerge"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testWholesomeMerge"); + HTable table = createTableAndLoadData(master, tablename); + + // Merge 1st and 2nd region + mergeAndVerify(master, tablename, 0, 1, INITIAL_REGION_NUM - 1); + + // Merge 2nd and 3th region + mergeAndVerify(master, tablename, 1, 2, INITIAL_REGION_NUM - 2); + + verifyRowCount(table, ROWSIZE); + + table.close(); + } + + @Test + public void testCheckQualifiersWhenMerging() throws Exception { + LOG.info("Starting testCheckQualifiersWhenMerging"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testCheckQualifiersWhenMerging"); + HTable table = createTableAndLoadData(master, tablename); + List> tableRegions = MetaReader + .getTableRegionsAndLocations(master.getCatalogTracker(), + Bytes.toString(tablename)); + + assertTrue(tableRegions.size() > 2); + + HRegionInfo mergeA = tableRegions.get(0).getFirst(); + HRegionInfo mergeB = tableRegions.get(1).getFirst(); + RegionMergeManager regionMergeManager = master.assignmentManager + .getMergeManager(); + MergeTransactionData transactionData = new MergeTransactionData( + MergeTransactionData.generateTransactionName(Bytes.toString(tablename), + mergeA.getEncodedName(), mergeB.getEncodedName(), false)); + TestingMergeTransaction mergeTransaction = new TestingMergeTransaction( + master, regionMergeManager, transactionData, mergeA, mergeB, + JournalState.CREATE_MERGE, TaskMonitor.get().createStatus("Test")); + + checkQualifierInMeta = true; + regionMergeManager.addToMergingRegions(mergeA.getEncodedName()); + regionMergeManager.addToMergingRegions(mergeB.getEncodedName()); + mergeTransaction.execute(master); + checkQualifierInMeta = false; + table.close(); + } + + @Test + public void testConsistentViewInMETAWhenMerging() throws Exception { + LOG.info("Starting testConsistentViewOfMETAWhenMerging"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testConsistentViewOfMETAWhenMerging"); + HTable table = createTableAndLoadData(master, tablename); + checkedTableForConsistentView = table; + // Merge 1st and 2nd region + mergeAndVerify(master, tablename, 0, 1, INITIAL_REGION_NUM - 1); + List errorList = new ArrayList(); + for (Future checkResult : metaEntriesConsistencyCheckResults) { + String errorMsg = checkResult.get(); + if (errorMsg != null) { + errorList.add(errorMsg); + } + } + metaEntriesConsistencyCheckResults.clear(); + if (!errorList.isEmpty()) { + fail(errorList.get(0)); + } + verifyRowCount(table, ROWSIZE); + checkedTableForConsistentView = null; + table.close(); + } + + private static void checkQualifiersInMetaAfterMerging( + HRegionInfo mergedRegion, HRegionInfo mergingRegionA, + HRegionInfo mergingRegionB) throws IOException { + Result resultOfMergedRegion = MetaReader.getRegionResult(TEST_UTIL + .getHBaseCluster().getMaster().getCatalogTracker(), + mergedRegion.getRegionName()); + Result resultOfMergingRegionA = MetaReader.getRegionResult(TEST_UTIL + .getHBaseCluster().getMaster().getCatalogTracker(), + mergingRegionA.getRegionName()); + Result resultOfMergingRegionB = MetaReader.getRegionResult(TEST_UTIL + .getHBaseCluster().getMaster().getCatalogTracker(), + mergingRegionB.getRegionName()); + + assertEquals(mergingRegionA, HRegionInfo.getHRegionInfo( + resultOfMergedRegion, HConstants.MERGEA_QUALIFIER)); + + assertEquals(mergingRegionB, HRegionInfo.getHRegionInfo( + resultOfMergedRegion, HConstants.MERGEB_QUALIFIER)); + + assertEquals(mergingRegionB, HRegionInfo.getHRegionInfo( + resultOfMergingRegionA, HConstants.MERGEB_QUALIFIER)); + + HRegionInfo regionAInMeta = HRegionInfo + .getHRegionInfo(resultOfMergingRegionA); + assertTrue(regionAInMeta != null && regionAInMeta.isOffline()); + + assertEquals(mergingRegionA, HRegionInfo.getHRegionInfo( + resultOfMergingRegionB, HConstants.MERGEA_QUALIFIER)); + + HRegionInfo regionBInMeta = HRegionInfo + .getHRegionInfo(resultOfMergingRegionB); + assertTrue(regionBInMeta != null && regionBInMeta.isOffline()); + + } + + private static void startThreadForCheckingConsistentViewOfTableMetaEntries() { + final HTable table = checkedTableForConsistentView; + if (table != null) { + Future checkResult = metaEntriesConsistencyCheckPool + .submit(new Callable() { + @Override + public String call() throws Exception { + NavigableMap regionLocations = table + .getRegionLocations(); + HRegionInfo lastRegion = null; + for (HRegionInfo regionInfo : regionLocations.keySet()) { + if (lastRegion == null + && !Bytes.equals(HConstants.EMPTY_END_ROW, + regionInfo.getStartKey())) { + String errorMsg = "Table " + table.getTableDescriptor().getNameAsString() + + " is not start with empty start row, first region=" + + regionInfo.getRegionNameAsString(); + return errorMsg; + } else if (lastRegion != null + && !Bytes.equals(lastRegion.getEndKey(), + regionInfo.getStartKey())) { + String errorMsg = "Unconsistent view of meta entries when merging, " + + "we could see the regions " + + lastRegion.getRegionNameAsString() + + ", and " + + regionInfo.getRegionNameAsString() + " at the same time"; + return errorMsg; + } + lastRegion = regionInfo; + } + if (!Bytes.equals(lastRegion.getEndKey(), HConstants.EMPTY_END_ROW)) { + String errorMsg = "Table " + + table.getTableDescriptor().getNameAsString() + + " is not end with empty end row, last region=" + + lastRegion.getRegionNameAsString(); + return errorMsg; + } + return null; + } + }); + metaEntriesConsistencyCheckResults.add(checkResult); + try { + // Make sure scanner has been running + Thread.sleep(500); + } catch (InterruptedException e) { + } + } + } + + @Test + public void testRedoMergeWhenOfflineRegionEncountersException() + throws Exception { + LOG.info("Starting testRedoMergeWhenOfflineRegion"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testRedoMergeWhenOfflineRegion"); + HTable table = createTableAndLoadData(master, tablename); + + // Throw exception when offline region + exceptionThrownLocation = ExceptionLocation.BEFORE_OFFLINE; + requestMergeRegion(master, tablename, 0, 1); + waitAndVerifyResubmitQueueSize(master, 1); + + exceptionThrownLocation = ExceptionLocation.SET_ZK; + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitAndVerifyResubmitQueueSize(master, 1); + + exceptionThrownLocation = ExceptionLocation.NONE; + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitAndVerifyRegionNum(master, tablename, INITIAL_REGION_NUM - 1); + verifyRowCount(table, ROWSIZE); + + table.close(); + } + + @Test + public void testRedoMergeWhenExecuteMergeEncountersException() + throws Exception { + LOG.info("Starting testRedoMergeWhenExecuteMerge"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testRedoMergeWhenExecuteMerge"); + HTable table = createTableAndLoadData(master, tablename); + + // Throw exception when execute merge + exceptionThrownLocation = ExceptionLocation.BEFORE_EXECUTE; + requestMergeRegion(master, tablename, 0, 1); + waitAndVerifyResubmitQueueSize(master, 1); + + exceptionThrownLocation = ExceptionLocation.SET_ZK; + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitAndVerifyResubmitQueueSize(master, 1); + + exceptionThrownLocation = ExceptionLocation.NONE; + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitAndVerifyRegionNum(master, tablename, INITIAL_REGION_NUM - 1); + verifyRowCount(table, ROWSIZE); + table.close(); + } + + @Test + public void testRedoMergeWhenCompleteMergeEncountersException() + throws Exception { + LOG.info("Starting testRedoMergeWhenCompleteMerge"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testRedoMergeWhenCompleteMerge"); + HTable table = createTableAndLoadData(master, tablename); + + // Throw exception when complete merge + exceptionThrownLocation = ExceptionLocation.BEFORE_COMPLETE; + requestMergeRegion(master, tablename, 0, 1); + waitAndVerifyResubmitQueueSize(master, 1); + exceptionThrownLocation = ExceptionLocation.NONE; + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitAndVerifyRegionNum(master, tablename, INITIAL_REGION_NUM - 1); + verifyRowCount(table, ROWSIZE); + + table.close(); + } + + @Test + public void testRedoMergeWhenCancelMergeEncountersException() + throws Exception { + LOG.info("Starting testRedoMergeWhenCancelMerge"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testRedoMergeWhenCancelMerge"); + HTable table = createTableAndLoadData(master, tablename); + + // Throw exception when cancel merge + exceptionThrownLocation = ExceptionLocation.BEFORE_CANCEL; + requestMergeRegion(master, tablename, 0, 1); + waitAndVerifyResubmitQueueSize(master, 1); + exceptionThrownLocation = ExceptionLocation.NONE; + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitAndVerifyRegionNum(master, tablename, INITIAL_REGION_NUM); + verifyRowCount(table, ROWSIZE); + + table.close(); + } + + @Test(timeout = 180000) + public void testConcurrentRegionMergingAndRegionSplitting() + throws Exception { + LOG.info("Starting testConcurrentRegionMergingAndRegionSplitting"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + final HMaster master = cluster.getMaster(); + final byte[] tablename = Bytes + .toBytes("testConcurrentRegionMergingAndRegionSplitting"); + final int numRegions = 100; + HTable table = createTableAndLoadData(master, tablename, numRegions); + final List> tableRegions = MetaReader + .getTableRegionsAndLocations(master.getCatalogTracker(), + Bytes.toString(tablename)); + assert tableRegions.size()==numRegions; + + final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + SecureRandom random = new SecureRandom(); + int successfulCountBeforeTest = successfullyCompletedCount.get(); + + final List splitPoints = new ArrayList(); + for (int i = 0; i < numRegions; i++) { + HRegionInfo regionInfo = tableRegions.get(i).getFirst(); + byte[] splitPoint=ROWS[random.nextInt(ROWSIZE / numRegions) + + i * ROWSIZE / numRegions]; + assertTrue(regionInfo.getRegionNameAsString() + " doesn't contain row " + + Bytes.toString(splitPoint), regionInfo.containsRow(splitPoint)); + splitPoints.add(splitPoint); + } + + Thread mergeRequestThread=new Thread(){ + public void run() { + for (int i = 0; i < numRegions; i += 2) { + try { + RegionMergeManager.createMergeRequest(master.getZooKeeper(), + Bytes.toString(tablename), tableRegions.get(i).getFirst() + .getEncodedName(), tableRegions.get(i + 1).getFirst() + .getEncodedName(), false); + } catch (Exception e) { + fail("Failed creating merge request for region " + + tableRegions.get(i).getFirst().getEncodedName() + "and " + + tableRegions.get(i + 1).getFirst().getEncodedName()); + } + } + } + }; + + Thread splitRequestThread = new Thread() { + public void run() { + for (int i = 0; i < numRegions; i++) { + try{ + admin.split(tableRegions.get(i).getFirst().getRegionName(), + splitPoints.get(i)); + } catch (Exception e) { + LOG.info("Region may be in merging, failed calling split, " + + e.getMessage()); + } + + } + } + }; + mergeRequestThread.start(); + splitRequestThread.start(); + mergeRequestThread.join(); + splitRequestThread.join(); + waitUntilNoRegionInMerging(master.assignmentManager.getMergeManager()); + + int successfulCountThisTest = successfullyCompletedCount.get() - successfulCountBeforeTest; + LOG.info("Successfully merge " + successfulCountThisTest * 2 + " regions in testing"); + verifyRowCount(table, ROWSIZE); + table.close(); + } + + /** + * Restart master and regionserver when do merging + * @throws Exception + */ + @Test + public void testRedoMergeWhenServerRestart() throws Exception { + LOG.info("Starting testRedoMergeWhenServerRestart"); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + byte[] tablename = Bytes.toBytes("testRedoMergeWhenServerRestart"); + HTable table = createTableAndLoadData(master, tablename); + + LOG.info("Test restarting servers after entering state " + + JournalState.CREATE_MERGE); + restartServersAfterExpectedState(table, tablename, + JournalState.CREATE_MERGE); + + LOG.info("Test restarting servers after entering state " + + JournalState.OFFLINE_REGION); + restartServersAfterExpectedState(table, tablename, + JournalState.OFFLINE_REGION); + + LOG.info("Test restarting servers after entering state " + + JournalState.EXECUTE_MERGING); + restartServersAfterExpectedState(table, tablename, + JournalState.EXECUTE_MERGING); + + LOG.info("Test restarting servers after entering state " + + JournalState.COMPLETE_MERGING); + restartServersAfterExpectedState(table, tablename, + JournalState.COMPLETE_MERGING); + + LOG.info("Test restarting servers after entering state " + + JournalState.CANCEL_MERGING); + forceEnterCancelState = true; + restartServersAfterExpectedState(table, tablename, + JournalState.CANCEL_MERGING); + forceEnterCancelState = false; + + table.close(); + } + + private void restartServersAfterExpectedState(final HTable table, + final byte[] tablename, JournalState expectedState) throws Exception { + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + int regionNumBeforeMerging = getTableRegionsNum(master, tablename); + currentMergeTransaction = null; + requestMergeRegion(master, tablename, 0, 1); + LOG.info("Restarting servers after entering state " + expectedState); + waitUntilEnterExpectedState(expectedState); + restartMasterAndServer(); + // Get master again after restart; + master = cluster.getMaster(); + master.assignmentManager.getMergeManager().triggerResubmissionForTest(); + waitUntilEnterExpectedState(JournalState.END); + + if (currentMergeTransaction.isSuccessful()) { + waitAndVerifyRegionNum(master, tablename, regionNumBeforeMerging - 1); + } + verifyRowCount(table, ROWSIZE); + } + + private void restartMasterAndServer() throws Exception { + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + HMaster master = cluster.getMaster(); + List liveRSs = cluster + .getLiveRegionServerThreads(); + assert liveRSs.size() == slaves; + LOG.info("Aborting regionserver..."); + SecureRandom random = new SecureRandom(); + HRegionServer rs = liveRSs.get(random.nextInt(slaves)).getRegionServer(); + rs.abort("Aborting for tests", new Exception("Trace info")); + cluster.waitForRegionServerToStop(rs.getServerName(), waitTime); + liveRSs = cluster.getLiveRegionServerThreads(); + // wait until there is user region on the live RS, it is used to prevent + // master making a mistake between "Clean cluster startup" and "failover" + waitUntilHasUserRegionOnline(); + List liveMasters = cluster + .getLiveMasterThreads(); + assert liveMasters.size() == 1; + LOG.info("Aborting master..."); + master.abort("Aborting for tests", new Exception("Trace info")); + cluster.waitForMasterToStop(master.getServerName(), waitTime); + + master = cluster.startMaster().getMaster(); + TEST_UTIL.ensureSomeRegionServersAvailable(slaves); + long timeout = System.currentTimeMillis() + waitTime; + while ((!master.isInitialized() || master.getServerManager() + .areDeadServersInProgress()) && System.currentTimeMillis() < timeout) { + Thread.sleep(50); + } + } + + private void waitUntilHasUserRegionOnline() throws InterruptedException { + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + List liveRSs = cluster + .getLiveRegionServerThreads(); + long timeout = System.currentTimeMillis() + waitTime; + while (System.currentTimeMillis() < timeout) { + int onlineRegionNum = 0; + for (JVMClusterUtil.RegionServerThread rst : liveRSs) { + onlineRegionNum += rst.getRegionServer().getOnlineRegionsLocalContext() + .size(); + } + if (onlineRegionNum > 2) { + return; + } + Thread.sleep(50); + } + fail("Timeout waiting user region online on live RS"); + } + + private void waitUntilNoRegionInMerging(final RegionMergeManager regionMergeManager) + throws Exception { + long timeout = System.currentTimeMillis() + waitTime; + while (System.currentTimeMillis() < timeout) { + if (regionMergeManager.getMergingRegions().isEmpty()) { + break; + } + Thread.sleep(50); + } + assertTrue("Timeout waiting merging, regions in merging:" + + regionMergeManager.getMergingRegions(), + regionMergeManager.getMergingRegions().isEmpty()); + } + + private void waitUntilEnterExpectedState(JournalState expectedState) + throws Exception { + long timeout = System.currentTimeMillis() + waitTime; + while (currentMergeTransaction == null + || currentMergeTransaction.currentState.ordinal() < expectedState + .ordinal()) { + Thread.sleep(5); + if (System.currentTimeMillis() > timeout) { + fail("Timeout waiting merge to enter state " + + expectedState + ", currentState=" + + (currentMergeTransaction == null ? "NULL" + : currentMergeTransaction.currentState)); + break; + } + } + } + + /** + * An MergeTransaction instance used in this test. + */ + public static class TestingMergeTransaction extends MergeTransaction { + + public TestingMergeTransaction(MasterServices masterServices, + RegionMergeManager mergeManager, MergeTransactionData transactionData, + HRegionInfo mergeA, HRegionInfo mergeB, JournalState initialState, + MonitoredTask status) throws FileNotFoundException, IOException { + super(masterServices, mergeManager, transactionData, mergeA, mergeB, + initialState, status); + currentMergeTransaction = this; + } + + @Override + void offlineRegion(MasterServices masterServices) throws KeeperException, + InterruptedException, IOException { + throwExceptionIfTest(ExceptionLocation.BEFORE_OFFLINE); + super.offlineRegion(masterServices); + } + + @Override + void executeMerging(MasterServices masterServices) throws NoNodeException, + KeeperException, IOException { + throwExceptionIfTest(ExceptionLocation.BEFORE_EXECUTE); + super.executeMerging(masterServices); + startThreadForCheckingConsistentViewOfTableMetaEntries(); + if (checkQualifierInMeta) { + checkQualifiersInMetaAfterMerging(super.mergedRegion, super.regionA, + super.regionB); + } + } + + @Override + void completeMerging(MasterServices masterServices) throws KeeperException, + IOException, InterruptedException { + throwExceptionIfTest(ExceptionLocation.BEFORE_COMPLETE); + super.completeMerging(masterServices); + successfullyCompletedCount.incrementAndGet(); + } + + @Override + void cancelMerging(MasterServices masterServices) throws NoNodeException, + KeeperException, InterruptedException { + throwExceptionIfTest(ExceptionLocation.BEFORE_CANCEL); + super.cancelMerging(masterServices); + } + + @Override + void prepare(MasterServices masterServices) throws IOException, + NoNodeException, KeeperException { + super.prepare(masterServices); + if (forceEnterCancelState + || exceptionThrownLocation == ExceptionLocation.BEFORE_CANCEL) { + super.currentState = JournalState.CANCEL_MERGING; + super.setJournalStateOnZK(); + } + } + + @Override + protected void setJournalStateOnZK() throws NoNodeException, + KeeperException { + throwExceptionIfTest(ExceptionLocation.SET_ZK); + if (!checkQualifierInMeta) { + super.setJournalStateOnZK(); + } + + } + + @Override + protected void offlineMergingRegionInMeta(CatalogTracker catalogTracker, + HRegionInfo merging, HRegionInfo merged, HRegionInfo anotherMerging, + boolean left) throws IOException { + super.offlineMergingRegionInMeta(catalogTracker, merging, merged, + anotherMerging, left); + startThreadForCheckingConsistentViewOfTableMetaEntries(); + + } + @Override + protected void deleteMergingRegionInMeta(CatalogTracker catalogTracker, + HRegionInfo regionInfo) throws IOException { + super.deleteMergingRegionInMeta(catalogTracker, regionInfo); + startThreadForCheckingConsistentViewOfTableMetaEntries(); + } + + void throwExceptionIfTest(ExceptionLocation expectedLocation) { + if (exceptionThrownLocation == expectedLocation) { + throw new RuntimeException("Test"); + } + } + + } + + private HTable createTableAndLoadData(HMaster master, byte[] tablename) + throws Exception { + return createTableAndLoadData(master, tablename, INITIAL_REGION_NUM); + } + + private HTable createTableAndLoadData(HMaster master, byte[] tablename, + int numRegions) throws Exception { + assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions); + byte[][] splitRows = new byte[numRegions - 1][]; + for (int i = 0; i < splitRows.length; i++) { + splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions]; + } + + HTable table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows); + loadData(table); + verifyRowCount(table, ROWSIZE); + + // sleep here is an ugly hack to allow region transitions to finish + long timeout = System.currentTimeMillis() + waitTime; + List> tableRegions; + while (System.currentTimeMillis() < timeout) { + tableRegions = MetaReader.getTableRegionsAndLocations( + master.getCatalogTracker(), Bytes.toString(tablename)); + if (tableRegions.size() == numRegions) + break; + Thread.sleep(250); + } + + tableRegions = MetaReader.getTableRegionsAndLocations( + master.getCatalogTracker(), Bytes.toString(tablename)); + LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); + assertEquals(numRegions, tableRegions.size()); + return table; + } + + private void loadData(HTable table) throws IOException { + for (int i = 0; i < ROWSIZE; i++) { + Put put = new Put(ROWS[i]); + put.add(FAMILYNAME, QUALIFIER, Bytes.toBytes(i)); + table.put(put); + } + } + + private void verifyRowCount(HTable table, int expectedRegionNum) + throws IOException { + ResultScanner scanner = table.getScanner(new Scan()); + int rowCount = 0; + while (scanner.next() != null) { + rowCount++; + } + assertEquals(expectedRegionNum, rowCount); + scanner.close(); + } + + + private void mergeAndVerify(HMaster master, byte[] tablename, int regionAnum, + int regionBnum, int expectedRegionNum) throws Exception { + requestMergeRegion(master, tablename, regionAnum, regionBnum); + waitAndVerifyRegionNum(master, tablename, expectedRegionNum); + + } + + private void requestMergeRegion(HMaster master, byte[] tablename, + int regionAnum, int regionBnum) throws Exception { + List> tableRegions = MetaReader + .getTableRegionsAndLocations(master.getCatalogTracker(), + Bytes.toString(tablename)); + TEST_UTIL.getHBaseAdmin().mergeRegions(tablename, + tableRegions.get(regionAnum).getFirst().getEncodedNameAsBytes(), + tableRegions.get(regionBnum).getFirst().getEncodedNameAsBytes(), false); + } + + private void waitAndVerifyRegionNum(HMaster master, byte[] tablename, + int expectedRegionNum) throws Exception { + List> tableRegions; + long timeout = System.currentTimeMillis() + waitTime; + while (System.currentTimeMillis() < timeout) { + tableRegions = MetaReader.getTableRegionsAndLocations( + master.getCatalogTracker(), Bytes.toString(tablename)); + if (tableRegions.size() == expectedRegionNum)break; + Thread.sleep(250); + } + + tableRegions = MetaReader.getTableRegionsAndLocations( + master.getCatalogTracker(), Bytes.toString(tablename)); + LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegions)); + assertEquals(expectedRegionNum, tableRegions.size()); + } + + private int getTableRegionsNum(HMaster master, byte[] tablename) + throws Exception { + return MetaReader.getTableRegionsAndLocations(master.getCatalogTracker(), + Bytes.toString(tablename)).size(); + } + + private void waitAndVerifyResubmitQueueSize(HMaster master, int expectedSize) + throws Exception { + int queueSize; + long timeout = System.currentTimeMillis() + waitTime; + while (System.currentTimeMillis() < timeout) { + queueSize = master.getAssignmentManager().getMergeManager().resubmitQueusSize(); + if (queueSize == expectedSize) break; + Thread.sleep(250); + } + + queueSize = master.getAssignmentManager().getMergeManager().resubmitQueusSize(); + assertEquals(expectedSize, queueSize); + } + + private static byte[][] makeN(byte[] base, int n) { + byte[][] ret = new byte[n][]; + for (int i = 0; i < n; i++) { + ret[i] = Bytes.add(base, Bytes.toBytes(String.format("%04d", i))); + } + return ret; + } + +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java (working copy) @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.PairOfSameType; /** @@ -350,17 +351,19 @@ } /** - * A MetaScannerVisitor that provides a consistent view of the table's - * META entries during concurrent splits (see HBASE-5986 for details). This class - * does not guarantee ordered traversal of meta entries, and can block until the - * META entries for daughters are available during splits. + * A MetaScannerVisitor that provides a consistent view of the table's META + * entries during concurrent splits (see HBASE-5986 for details) and merges. + * This class does not guarantee ordered traversal of meta entries, and can + * block until the META entries for daughters/merged regions are available + * during splits/merges. */ public static abstract class BlockingMetaScannerVisitor extends MetaScannerVisitorBase { private static final int DEFAULT_BLOCKING_TIMEOUT = 10000; private Configuration conf; - private TreeSet daughterRegions = new TreeSet(Bytes.BYTES_COMPARATOR); + private TreeSet processedRegions = new TreeSet( + Bytes.BYTES_COMPARATOR); private int blockingTimeout; private HTable metaTable; @@ -395,7 +398,7 @@ return true; } - if (daughterRegions.remove(info.getRegionName())) { + if (processedRegions.remove(info.getRegionName())) { return true; //we have already processed this row } @@ -409,27 +412,65 @@ HRegionInfo splitB = daughters.getSecond(); HTable metaTable = getMetaTable(); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTimeMillis(); Result resultA = getRegionResultBlocking(metaTable, blockingTimeout, splitA.getRegionName()); if (resultA != null) { processRow(resultA); - daughterRegions.add(splitA.getRegionName()); + processedRegions.add(splitA.getRegionName()); } else { throw new RegionOfflineException("Split daughter region " + splitA.getRegionNameAsString() + " cannot be found in META."); } - long rem = blockingTimeout - (System.currentTimeMillis() - start); + long rem = blockingTimeout - (EnvironmentEdgeManager.currentTimeMillis() - start); Result resultB = getRegionResultBlocking(metaTable, rem, splitB.getRegionName()); if (resultB != null) { processRow(resultB); - daughterRegions.add(splitB.getRegionName()); + processedRegions.add(splitB.getRegionName()); } else { throw new RegionOfflineException("Split daughter region " + splitB.getRegionNameAsString() + " cannot be found in META."); } + } else if (info.isOffline()) { + HRegionInfo mergedRegion = HRegionInfo.getHRegionInfo(rowResult, + HConstants.MERGED_QUALIFIER); + HRegionInfo mergeB = HRegionInfo.getHRegionInfo(rowResult, + HConstants.MERGEB_QUALIFIER); + HRegionInfo mergeA = HRegionInfo.getHRegionInfo(rowResult, + HConstants.MERGEA_QUALIFIER); + if (mergedRegion != null && mergeB != null) { + /* + * we have found a merging region mergeA. We have to ensure that + * merged region is seen by this scanner, so we block until it is + * written in META. It is similar with action for parent region above. + * + * In order to provide a consistent view, we skip the process for + * mergeA and mergeB + */ + HTable metaTable = getMetaTable(); + Result resultOfMergedRegion = getRegionResultBlocking(metaTable, + blockingTimeout, mergedRegion.getRegionName()); + if (resultOfMergedRegion != null) { + processRow(resultOfMergedRegion); + processedRegions.add(mergedRegion.getRegionName()); + processedRegions.add(mergeB.getRegionName()); + return true; + } else { + throw new RegionOfflineException("Merged region " + + mergedRegion.getRegionNameAsString() + + " cannot be found in META."); + } + } else if (mergedRegion != null && mergeA != null) { + /* + * We will skip the mergeB when processing mergeA, but we have found + * the merging region mergeB now, it means scanner has seen the merged + * region and not seen the mergeA region because it has been deleted. + * So what we should do here is skipping the process for mergeB + */ + return true; + } } return processRowInternal(rowResult); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (working copy) @@ -137,6 +137,8 @@ .getTableRegionsAndLocations(this.ct, tableName, true); int countOfRegionsInTable = tableRegionsAndLocations.size(); List regions = regionsToAssignWithServerName(tableRegionsAndLocations); + List regionsInMerging = assignmentManager.getMergingRegions(regions); + regions.removeAll(regionsInMerging); int regionsCount = regions.size(); if (regionsCount == 0) { done = true; @@ -266,7 +268,8 @@ } private boolean isDone(final List regions) { - return regions != null && regions.size() >= this.countOfRegionsInTable; + return regions != null && (regions.size() + assignmentManager + .getNumOfMergingRegionsForTable(tableNameStr)) >= this.countOfRegionsInTable; } } } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (working copy) @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; @@ -43,12 +44,14 @@ import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.hadoop.hbase.util.Triple; /** * A janitor for the catalog tables. Scans the .META. catalog @@ -106,16 +109,37 @@ } /** - * Scans META and returns a number of scanned rows, and - * an ordered map of split parents. + * Scans META and returns a number of scanned rows, and a map of merged + * regions, and an ordered map of split parents. + * @return triple of scanned rows, map of merged regions and map of split + * parent regioninfos + * @throws IOException */ - Pair> getSplitParents() throws IOException { + Triple, Map> getMergedRegionsAndSplitParents() + throws IOException { + return getMergedRegionsAndSplitParents(null); + } + + /** + * Scans META and returns a number of scanned rows, and a map of merged + * regions, and an ordered map of split parents. if the given table name is + * null, return merged regions and split parents of all tables, else only the + * specified table + * @param tableName null represents all tables + * @return triple of scanned rows, and map of merged regions, and map of split + * parent regioninfos + * @throws IOException + */ + Triple, Map> getMergedRegionsAndSplitParents( + final byte[] tableName) throws IOException { + final boolean isTableSpecified = (tableName != null && tableName.length != 0); // TODO: Only works with single .META. region currently. Fix. final AtomicInteger count = new AtomicInteger(0); // Keep Map of found split parents. There are candidates for cleanup. // Use a comparator that has split parents come before its daughters. final Map splitParents = new TreeMap(new SplitParentFirstComparator()); + final Map mergedRegions = new TreeMap(); // This visitor collects split parents and counts rows in the .META. table MetaReader.Visitor visitor = new MetaReader.Visitor() { @Override @@ -124,20 +148,62 @@ count.incrementAndGet(); HRegionInfo info = HRegionInfo.getHRegionInfo(r); if (info == null) return true; // Keep scanning + if (isTableSpecified + && Bytes.compareTo(info.getTableName(), tableName) > 0) { + // Another table, stop scanning + return false; + } if (info.isSplitParent()) splitParents.put(info, r); + if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) { + mergedRegions.put(info, r); + } // Returning true means "keep scanning" return true; } }; - // Run full scan of .META. catalog table passing in our custom visitor - MetaReader.fullScan(this.server.getCatalogTracker(), visitor); - return new Pair>(count.get(), splitParents); + byte[] startRow = (!isTableSpecified) ? HConstants.EMPTY_START_ROW + : HRegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, + HConstants.ZEROES, false); + // Run full scan of .META. catalog table passing in our custom visitor with + // the start row + MetaReader.fullScan(this.server.getCatalogTracker(), visitor, startRow); + + return new Triple, Map>( + count.get(), mergedRegions, splitParents); } /** + * If merged region no longer hold reference to the merge regions, delete the + * merge region. + * @param mergedRegion + * @param regionA + * @param regionB + * @return true if we removed merge region from meta and from the file system + * @throws IOException + */ + boolean cleanMergeRegion(final HRegionInfo mergedRegion, + final HRegionInfo regionA, final HRegionInfo regionB) throws IOException { + FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); + Path rootDir = this.services.getMasterFileSystem().getRootDir(); + HTableDescriptor htd = getTableDescriptor(mergedRegion.getTableName()); + if (!HRegion.containsReferenceFile(fs, rootDir, mergedRegion, htd)) { + LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and " + + regionB.getRegionNameAsString() + + " from fs because merged region no longer holds references"); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB); + MetaEditor.deleteReferencesInMergedRegion(server.getCatalogTracker(), + mergedRegion); + return true; + } + return false; + } + + /** * Run janitorial scan of catalog .META. table looking for * garbage to collect. + * @return number of cleaned regions * @throws IOException */ int scan() throws IOException { @@ -145,18 +211,44 @@ if (!alreadyRunning.compareAndSet(false, true)) { return 0; } - Pair> pair = getSplitParents(); - int count = pair.getFirst(); - Map splitParents = pair.getSecond(); + Triple, Map> scanTriple = + getMergedRegionsAndSplitParents(); + int count = scanTriple.getFirst(); + /** + * clean merge regions first + */ + int mergeCleaned = 0; + Map mergedRegions = scanTriple.getSecond(); + for (Map.Entry e : mergedRegions.entrySet()) { + HRegionInfo regionA = HRegionInfo.getHRegionInfo(e.getValue(), + HConstants.MERGEA_QUALIFIER); + HRegionInfo regionB = HRegionInfo.getHRegionInfo(e.getValue(), + HConstants.MERGEB_QUALIFIER); + if (regionA == null || regionB == null) { + LOG.warn("Unexpected references regionA=" + + (regionA == null ? "null" : regionA.getRegionNameAsString()) + + ",regionB=" + + (regionB == null ? "null" : regionB.getRegionNameAsString()) + + " in merged region " + e.getKey().getRegionNameAsString()); + } else { + if (cleanMergeRegion(e.getKey(), regionA, regionB)) { + mergeCleaned++; + } + } + } + /** + * clean split parents + */ + Map splitParents = scanTriple.getThird(); // Now work on our list of found parents. See if any we can clean up. - int cleaned = 0; - //regions whose parents are still around + int splitCleaned = 0; + // regions whose parents are still around HashSet parentNotCleaned = new HashSet(); for (Map.Entry e : splitParents.entrySet()) { if (!parentNotCleaned.contains(e.getKey().getEncodedName()) && cleanParent(e.getKey(), e.getValue())) { - cleaned++; + splitCleaned++; } else { // We could not clean the parent, so it's daughters should not be cleaned either (HBASE-6160) PairOfSameType daughters = HRegionInfo.getDaughterRegions(e.getValue()); @@ -164,14 +256,16 @@ parentNotCleaned.add(daughters.getSecond().getEncodedName()); } } - if (cleaned != 0) { - LOG.info("Scanned " + count + " catalog row(s) and gc'd " + cleaned + - " unreferenced parent region(s)"); + if ((mergeCleaned + splitCleaned) != 0) { + LOG.info("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned + + " unreferenced merged region(s) and " + splitCleaned + + " unreferenced parent region(s)"); } else if (LOG.isDebugEnabled()) { - LOG.debug("Scanned " + count + " catalog row(s) and gc'd " + cleaned + - " unreferenced parent region(s)"); + LOG.debug("Scanned " + count + " catalog row(s), gc'd " + mergeCleaned + + " unreferenced merged region(s) and " + splitCleaned + + " unreferenced parent region(s)"); } - return cleaned; + return mergeCleaned + splitCleaned; } finally { alreadyRunning.set(false); } @@ -224,6 +318,12 @@ boolean cleanParent(final HRegionInfo parent, Result rowContent) throws IOException { boolean result = false; + // Check whether it is a merged region and not clean reference + if (rowContent.getValue(HConstants.CATALOG_FAMILY, + HConstants.MERGEA_QUALIFIER) != null) { + // wait cleaning merge region first + return result; + } // Run checks on each daughter split. PairOfSameType daughters = HRegionInfo.getDaughterRegions(rowContent); Pair a = Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -69,6 +69,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -4317,6 +4318,126 @@ return dstRegion; } + /** + * Merge two regions whether they are adjacent or not. + * @param conf + * @param fs + * @param rootDir + * @param tabledesc + * @param a merging region A + * @param b merging region B + * @return the merged region info + * @throws IOException + */ + public static HRegionInfo merge(final Configuration conf, + final FileSystem fs, final Path rootDir, + final HTableDescriptor tabledesc, final HRegionInfo a, final HRegionInfo b) + throws IOException { + HRegionInfo newRegionInfo = HRegionInfo.generateFixedRegionByMerge( + tabledesc, a, b); + Path tableDir = HTableDescriptor.getTableDir(rootDir, a.getTableName()); + String newRegionEncodedName = newRegionInfo.getEncodedName(); + Path newRegionDir = HRegion.getRegionDir(tableDir, newRegionEncodedName); + if (!fs.exists(newRegionDir)) { + if (!fs.mkdirs(newRegionDir)) { + throw new IOException("Failed making dirs " + newRegionDir); + } + } + + LOG.info("Starting merge of regions on fs : " + a.getRegionNameAsString() + + " and " + b.getRegionNameAsString() + " into new region " + + newRegionInfo.getRegionNameAsString() + " with start key <" + + Bytes.toStringBinary(newRegionInfo.getStartKey()) + "> and end key <" + + Bytes.toStringBinary(newRegionInfo.getEndKey()) + ">"); + + // Create reference files under new region directory + Map>> byFamily = new TreeMap>>( + Bytes.BYTES_COMPARATOR); + HRegionInfo[] regionInfos = new HRegionInfo[] { a, b }; + for (HRegionInfo hri : regionInfos) { + for (HColumnDescriptor family : tabledesc.getFamilies()) { + Path familyDir = HStore.getStoreHomedir(tableDir, hri.getEncodedName(), + family.getName()); + FileStatus files[] = fs.listStatus(familyDir); + if (files == null || files.length == 0) continue; + List> familyFileList = byFamily.get(family.getName()); + if (familyFileList == null) { + familyFileList = new ArrayList>(); + byFamily.put(family.getName(), familyFileList); + } + for (int i = 0; i < files.length; i++) { + if (StoreFile.isReference(files[i].getPath())) { + throw new IOException("Reference file still exists:" + + files[i].getPath()); + } + familyFileList.add(new Pair(hri, files[i] + .getPath())); + } + } + } + + for (Map.Entry>> es : byFamily + .entrySet()) { + byte[] colFamily = es.getKey(); + Path storeDir = HStore.getStoreHomedir(tableDir, + newRegionInfo.getEncodedName(), colFamily); + if (fs.exists(storeDir)) { + if (!fs.delete(storeDir, true)) { + throw new IOException("Failed cleaning existed store dir " + storeDir); + } + } + if (!fs.mkdirs(storeDir)) { + throw new IOException("Failed creating merged region's store dir " + + storeDir); + } + List> srcFiles = es.getValue(); + for (Pair hsf : srcFiles) { + // Create a whole reference file + StoreFile.split(fs, storeDir, hsf.getSecond(), hsf.getFirst() + .getStartKey(), true); + } + } + + LOG.info("Merge completed on the fs. New region is " + + newRegionInfo.getRegionNameAsString()); + return newRegionInfo; + } + + /** + * Check whether region has Reference file + * @param fs + * @param rootDir + * @param region + * @param htd + * @return true if region has reference file + * @throws IOException + */ + public static boolean containsReferenceFile(final FileSystem fs, + final Path rootDir, final HRegionInfo region, final HTableDescriptor htd) + throws IOException { + Path tabledir = new Path(rootDir, region.getTableNameAsString()); + boolean hasReference = false; + for (HColumnDescriptor family : htd.getFamilies()) { + Path p = HStore.getStoreHomedir(tabledir, region.getEncodedName(), + family.getName()); + if (!fs.exists(p)) + continue; + // Look for reference files. Call listStatus with anonymous instance of + // PathFilter. + FileStatus[] ps = FSUtils.listStatus(fs, p, new PathFilter() { + public boolean accept(Path path) { + return StoreFile.isReference(path); + } + }); + + if (ps != null && ps.length > 0) { + hasReference = true; + break; + } + } + return hasReference; + } + /* * Fills a map with a vector of store files keyed by column family. * @param byFamily Map to fill. Index: hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (working copy) @@ -194,6 +194,55 @@ } /** + * Adds a META row for the specified new merged region. + * @param catalogTracker + * @param mergedRegion + * @param mergeA + * @param mergeB + * @throws IOException + */ + public static void addMergedRegionToMeta(final CatalogTracker catalogTracker, + final HRegionInfo mergedRegion, final HRegionInfo mergeA, + final HRegionInfo mergeB) + throws IOException { + Put put = makePutFromRegionInfo(mergedRegion); + put.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, + mergeA.toByteArray()); + put.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, + mergeB.toByteArray()); + putToMetaTable(catalogTracker, put); + LOG.info("Added merged region " + mergedRegion.getRegionNameAsString() + + " to META, mergeA=" + mergeA.getRegionNameAsString() + ",mergeB=" + + mergeB.getRegionNameAsString()); + } + + /** + * Offline merging region in meta. Used when merging regions. + * @param catalogTracker + * @param merging + * @param merged + * @param anotherMerging + * @param left true if this merging region is mergeA + * @throws IOException + */ + public static void offlineMergingRegionInMeta( + final CatalogTracker catalogTracker, final HRegionInfo merging, + final HRegionInfo merged, final HRegionInfo anotherMerging, + final boolean left) throws IOException { + HRegionInfo copyOfMerging = new HRegionInfo(merging); + copyOfMerging.setOffline(true); + Put put = makePutFromRegionInfo(copyOfMerging); + put.add(HConstants.CATALOG_FAMILY, HConstants.MERGED_QUALIFIER, + merged.toByteArray()); + put.add(HConstants.CATALOG_FAMILY, (left ? HConstants.MERGEB_QUALIFIER + : HConstants.MERGEA_QUALIFIER), anotherMerging.toByteArray()); + putToMetaTable(catalogTracker, put); + LOG.info("Offlined merging region " + merging.getRegionNameAsString() + + " in META, merged region=" + merged.getEncodedName() + + ", another merging region=" + anotherMerging.getEncodedName()); + } + + /** * Adds a META row for each of the specified new regions. * @param catalogTracker CatalogTracker * @param regionInfos region information list @@ -351,6 +400,25 @@ ", from parent " + parent.getRegionNameAsString()); } + /** + * Deletes references in merged region. + * @param catalogTracker + * @param mergedRegion + * @throws IOException + */ + public static void deleteReferencesInMergedRegion( + CatalogTracker catalogTracker, final HRegionInfo mergedRegion) + throws IOException { + Delete delete = new Delete(mergedRegion.getRegionName()); + delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER); + delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER); + deleteMetaTable(catalogTracker, delete); + LOG.info("Deleted references in merged region " + + mergedRegion.getRegionNameAsString() + ", qualifier=" + + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier=" + + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER)); + } + private static Put addRegionInfo(final Put p, final HRegionInfo hri) throws IOException { p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Index: hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (revision 1443294) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (working copy) @@ -1018,6 +1018,23 @@ /** * Create a table. * @param tableName + * @param family + * @param splitRows + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows) + throws IOException { + HTableDescriptor desc = new HTableDescriptor(tableName); + HColumnDescriptor hcd = new HColumnDescriptor(family); + desc.addFamily(hcd); + getHBaseAdmin().createTable(desc, splitRows); + return new HTable(getConfiguration(), tableName); + } + + /** + * Create a table. + * @param tableName * @param families * @param c Configuration to use * @param numVersions Index: hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java (working copy) @@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; @@ -903,6 +904,25 @@ } /** + * Create a protocol buffer MergeRegionRequest + * @param tableName + * @param regionAName + * @param regionBName + * @param force + * @return A MergeRegionRequest + */ + public static MergeRegionRequest buildMergeRegionRequest( + final byte[] tableName, final byte[] regionAName, + final byte[] regionBName, final boolean force) { + MergeRegionRequest.Builder builder = MergeRegionRequest.newBuilder(); + builder.setTableName(ByteString.copyFrom(tableName)); + builder.setRegionAName(ByteString.copyFrom(regionAName)); + builder.setRegionBName(ByteString.copyFrom(regionBName)); + builder.setForce(force); + return builder.build(); + } + + /** * Create a protocol buffer AssignRegionRequest * * @param regionName Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy) @@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MergeRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; @@ -1470,6 +1471,33 @@ } /** + * Merge the given two regions. If want to merge two non-adjacent regions, set + * force true , otherwise the request will be skipped. + * Asynchronous operation. + * @param tableName + * @param regionAEncodedName merging region's encoded name + * @param regionBEncodedName merging region's encoded name + * @param force if it is false, only merge two adjacent regions + * @throws MasterNotRunningException + */ + public void mergeRegions(final byte[] tableName, + final byte[] regionAEncodedName, final byte[] regionBEncodedName, + final boolean force) throws MasterNotRunningException { + MasterAdminKeepAliveConnection master = connection + .getKeepAliveMasterAdmin(); + try { + MergeRegionRequest request = RequestConverter.buildMergeRegionRequest( + tableName, regionAEncodedName, regionBEncodedName, force); + master.mergeRegions(null, request); + } catch (ServiceException se) { + LOG.error("Unexpected exception: " + se + + " from calling HMaster.mergeRegion"); + } finally { + master.close(); + } + } + + /** * @param regionName * Region name to assign. * @throws MasterNotRunningException Index: hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (working copy) @@ -292,6 +292,20 @@ } /** + * Gets the result in META the specified region. + * @param catalogTracker + * @param regionName + * @return result of the specified region + * @throws IOException + */ + public static Result getRegionResult(CatalogTracker catalogTracker, + byte[] regionName) throws IOException { + Get get = new Get(regionName); + get.addFamily(HConstants.CATALOG_FAMILY); + return get(getCatalogHTable(catalogTracker, regionName), get); + } + + /** * Checks if the specified table exists. Looks at the META table hosted on * the specified server. * @param catalogTracker Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (working copy) @@ -1058,6 +1058,34 @@ } /** + * Creates the specified node with the specified data . Does not set watch + * + *

+ * Throws an exception if the node already exists. + * + *

+ * The node created is persistent and open access. + * + *

+ * Returns the version number of the created node if successful. + * + * @param zkw zk reference + * @param znode path of node to create + * @param data data of node to create + * @throws KeeperException if unexpected zookeeper exception + * @throws KeeperException.NodeExistsException if node already exists + */ + public static void createAndNoWatch(ZooKeeperWatcher zkw, String znode, + byte[] data) throws KeeperException, KeeperException.NodeExistsException { + try { + zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), + CreateMode.PERSISTENT); + } catch (InterruptedException e) { + zkw.interruptedException(e); + } + } + + /** * Async creates the specified node with the specified data. * *

Throws an exception if the node already exists. Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 1443294) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy) @@ -922,17 +922,23 @@ final byte [] splitRow, final boolean top) throws IOException { + return split(fs, splitDir, f.getPath(), splitRow, top); + } + + static Path split(final FileSystem fs, final Path splitDir, + final Path filePath, final byte[] splitRow, final boolean top) + throws IOException { // A reference to the bottom half of the hsf store file. - Reference r = - top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow); + Reference r = top ? Reference.createTopReference(splitRow) : Reference + .createBottomReference(splitRow); // Add the referred-to regions name as a dot separated suffix. - // See REF_NAME_PARSER regex above. The referred-to regions name is + // See REF_NAME_PARSER regex above. The referred-to regions name is // up in the path of the passed in f -- parentdir is family, // then the directory above is the region name. - String parentRegionName = f.getPath().getParent().getParent().getName(); + String parentRegionName = filePath.getParent().getParent().getName(); // Write reference with same file id only with the other region name as // suffix and into the new region location (under same family). - Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName); + Path p = new Path(splitDir, filePath.getName() + "." + parentRegionName); return r.write(fs, p); } Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (revision 1443294) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (working copy) @@ -65,7 +65,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; -import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -536,9 +536,10 @@ splita.setOffline(true); //simulate that splita goes offline when it is split splitParents.put(splita, createResult(splita, splitaa,splitab)); + final Map mergedRegions = new TreeMap(); CatalogJanitor janitor = spy(new CatalogJanitor(server, services)); - doReturn(new Pair>( - 10, splitParents)).when(janitor).getSplitParents(); + doReturn(new Triple, Map>( + 10, mergedRegions, splitParents)).when(janitor).getMergedRegionsAndSplitParents(); //create ref from splita to parent Path splitaRef =