diff --git bin/region_mover.rb bin/region_mover.rb
index 482617c..425296a 100644
--- bin/region_mover.rb
+++ bin/region_mover.rb
@@ -102,7 +102,7 @@ def getServerNameForRegion(admin, r)
# Hack
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(admin.getConfiguration(), "region_mover", nil)
begin
- tracker = org.apache.hadoop.hbase.zookeeper.MetaRegionTracker.new(zkw, RubyAbortable.new())
+ tracker = org.apache.hadoop.hbase.zookeeper.MetaRegionLocator.new(zkw, RubyAbortable.new())
tracker.start()
while not tracker.isLocationAvailable()
sleep 0.1
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java
index c39d6ad..839264f 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
@@ -40,9 +41,9 @@ public interface Server extends Abortable, Stoppable {
ZooKeeperWatcher getZooKeeper();
/**
- * @return Master's instance of {@link CatalogTracker}
+ * Get wrapper short-circuit HConnection for this server.
*/
- CatalogTracker getCatalogTracker();
+ HConnection getShortCircuitConnection();
/**
* @return The unique server name for this server.
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
deleted file mode 100644
index 3889317..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
+++ /dev/null
@@ -1,457 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import com.google.common.base.Stopwatch;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException;
-import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.ipc.RemoteException;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.NoRouteToHostException;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-
-/**
- * Tracks the availability of the catalog tables
- * hbase:meta.
- *
- * This class is "read-only" in that the locations of the catalog tables cannot
- * be explicitly set. Instead, ZooKeeper is used to learn of the availability
- * and location of hbase:meta.
- *
- *
Call {@link #start()} to start up operation. Call {@link #stop()}} to
- * interrupt waits and close up shop.
- */
-@InterfaceAudience.Private
-public class CatalogTracker {
- // TODO JDC 11/30 We don't even have ROOT anymore, revisit
- // TODO: This class needs a rethink. The original intent was that it would be
- // the one-stop-shop for meta locations and that it would get this
- // info from reading and watching zk state. The class was to be used by
- // servers when they needed to know of meta movement but also by
- // client-side (inside in HTable) so rather than figure meta
- // locations on fault, the client would instead get notifications out of zk.
- //
- // But this original intent is frustrated by the fact that this class has to
- // read an hbase table, the -ROOT- table, to figure out the hbase:meta region
- // location which means we depend on an HConnection. HConnection will do
- // retrying but also, it has its own mechanism for finding root and meta
- // locations (and for 'verifying'; it tries the location and if it fails, does
- // new lookup, etc.). So, at least for now, HConnection (or HTable) can't
- // have a CT since CT needs a HConnection (Even then, do want HT to have a CT?
- // For HT keep up a session with ZK? Rather, shouldn't we do like asynchbase
- // where we'd open a connection to zk, read what we need then let the
- // connection go?). The 'fix' is make it so both root and meta addresses
- // are wholey up in zk -- not in zk (root) -- and in an hbase table (meta).
- //
- // But even then, this class does 'verification' of the location and it does
- // this by making a call over an HConnection (which will do its own root
- // and meta lookups). Isn't this verification 'useless' since when we
- // return, whatever is dependent on the result of this call then needs to
- // use HConnection; what we have verified may change in meantime (HConnection
- // uses the CT primitives, the root and meta trackers finding root locations).
- //
- // When meta is moved to zk, this class may make more sense. In the
- // meantime, it does not cohere. It should just watch meta and root and not
- // NOT do verification -- let that be out in HConnection since its going to
- // be done there ultimately anyways.
- //
- // This class has spread throughout the codebase. It needs to be reigned in.
- // This class should be used server-side only, even if we move meta location
- // up into zk. Currently its used over in the client package. Its used in
- // MetaReader and MetaEditor classes usually just to get the Configuration
- // its using (It does this indirectly by asking its HConnection for its
- // Configuration and even then this is just used to get an HConnection out on
- // the other end). I made https://issues.apache.org/jira/browse/HBASE-4495 for
- // doing CT fixup. St.Ack 09/30/2011.
- //
-
- // TODO: Timeouts have never been as advertised in here and its worse now
- // with retries; i.e. the HConnection retries and pause goes ahead whatever
- // the passed timeout is. Fix.
- private static final Log LOG = LogFactory.getLog(CatalogTracker.class);
- private final HConnection connection;
- private final ZooKeeperWatcher zookeeper;
- private final MetaRegionTracker metaRegionTracker;
- private boolean instantiatedzkw = false;
- private Abortable abortable;
-
- private boolean stopped = false;
-
- static final byte [] META_REGION_NAME =
- HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
-
- /**
- * Constructs a catalog tracker. Find current state of catalog tables.
- * Begin active tracking by executing {@link #start()} post construction. Does
- * not timeout.
- *
- * @param conf
- * the {@link Configuration} from which a {@link HConnection} will be
- * obtained; if problem, this connections
- * {@link HConnection#abort(String, Throwable)} will be called.
- * @throws IOException
- */
- public CatalogTracker(final Configuration conf) throws IOException {
- this(null, conf, HConnectionManager.getConnection(conf), null);
- }
-
- /**
- * Constructs the catalog tracker. Find current state of catalog tables.
- * Begin active tracking by executing {@link #start()} post construction.
- * Does not timeout.
- * @param zk If zk is null, we'll create an instance (and shut it down
- * when {@link #stop()} is called) else we'll use what is passed.
- * @param conf
- * @param abortable If fatal exception we'll call abort on this. May be null.
- * If it is we'll use the Connection associated with the passed
- * {@link Configuration} as our Abortable.
- * @throws IOException
- */
- public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
- HConnection connection, Abortable abortable)
- throws IOException {
- this.connection = connection;
- if (abortable == null) {
- // A connection is abortable.
- this.abortable = this.connection;
- } else {
- this.abortable = abortable;
- }
- Abortable throwableAborter = new Abortable() {
-
- @Override
- public void abort(String why, Throwable e) {
- throw new RuntimeException(why, e);
- }
-
- @Override
- public boolean isAborted() {
- return true;
- }
-
- };
- if (zk == null) {
- // Create our own. Set flag so we tear it down on stop.
- this.zookeeper =
- new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(),
- abortable);
- instantiatedzkw = true;
- } else {
- this.zookeeper = zk;
- }
- this.metaRegionTracker = new MetaRegionTracker(zookeeper, throwableAborter);
- }
-
- /**
- * Starts the catalog tracker.
- * Determines current availability of catalog tables and ensures all further
- * transitions of either region are tracked.
- * @throws IOException
- * @throws InterruptedException
- */
- public void start() throws IOException, InterruptedException {
- LOG.debug("Starting catalog tracker " + this);
- try {
- this.metaRegionTracker.start();
- } catch (RuntimeException e) {
- Throwable t = e.getCause();
- this.abortable.abort(e.getMessage(), t);
- throw new IOException("Attempt to start meta tracker failed.", t);
- }
- }
-
- /**
- * Stop working.
- * Interrupts any ongoing waits.
- */
- public void stop() {
- if (!this.stopped) {
- LOG.debug("Stopping catalog tracker " + this);
- this.stopped = true;
- this.metaRegionTracker.stop();
- try {
- if (this.connection != null) {
- this.connection.close();
- }
- } catch (IOException e) {
- // Although the {@link Closeable} interface throws an {@link
- // IOException}, in reality, the implementation would never do that.
- LOG.error("Attempt to close catalog tracker's connection failed.", e);
- }
- if (this.instantiatedzkw) {
- this.zookeeper.close();
- }
- }
- }
-
- /**
- * Gets the current location for hbase:meta or null if location is
- * not currently available.
- * @return {@link ServerName} for server hosting hbase:meta or null
- * if none available
- * @throws InterruptedException
- */
- public ServerName getMetaLocation() throws InterruptedException {
- return this.metaRegionTracker.getMetaRegionLocation();
- }
-
- /**
- * Checks whether meta regionserver znode has some non null data.
- * @return true if data is not null, false otherwise.
- */
- public boolean isMetaLocationAvailable() {
- return this.metaRegionTracker.isLocationAvailable();
- }
- /**
- * Gets the current location for hbase:meta if available and waits
- * for up to the specified timeout if not immediately available. Returns null
- * if the timeout elapses before root is available.
- * @param timeout maximum time to wait for root availability, in milliseconds
- * @return {@link ServerName} for server hosting hbase:meta or null
- * if none available
- * @throws InterruptedException if interrupted while waiting
- * @throws NotAllMetaRegionsOnlineException if meta not available before
- * timeout
- */
- public ServerName waitForMeta(final long timeout)
- throws InterruptedException, NotAllMetaRegionsOnlineException {
- ServerName sn = metaRegionTracker.waitMetaRegionLocation(timeout);
- if (sn == null) {
- throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms");
- }
- return sn;
- }
-
- /**
- * Gets a connection to the server hosting meta, as reported by ZooKeeper,
- * waiting up to the specified timeout for availability.
- * @param timeout How long to wait on meta location
- * @see #waitForMeta for additional information
- * @return connection to server hosting meta
- * @throws InterruptedException
- * @throws NotAllMetaRegionsOnlineException if timed out waiting
- * @throws IOException
- * @deprecated Use #getMetaServerConnection(long)
- */
- public AdminService.BlockingInterface waitForMetaServerConnection(long timeout)
- throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
- return getMetaServerConnection(timeout);
- }
-
- /**
- * Gets a connection to the server hosting meta, as reported by ZooKeeper,
- * waiting up to the specified timeout for availability.
- *
WARNING: Does not retry. Use an {@link HTable} instead.
- * @param timeout How long to wait on meta location
- * @see #waitForMeta for additional information
- * @return connection to server hosting meta
- * @throws InterruptedException
- * @throws NotAllMetaRegionsOnlineException if timed out waiting
- * @throws IOException
- */
- AdminService.BlockingInterface getMetaServerConnection(long timeout)
- throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
- return getCachedConnection(waitForMeta(timeout));
- }
-
- /**
- * Waits indefinitely for availability of hbase:meta. Used during
- * cluster startup. Does not verify meta, just that something has been
- * set up in zk.
- * @see #waitForMeta(long)
- * @throws InterruptedException if interrupted while waiting
- */
- public void waitForMeta() throws InterruptedException {
- Stopwatch stopwatch = new Stopwatch().start();
- while (!this.stopped) {
- try {
- if (waitForMeta(100) != null) break;
- long sleepTime = stopwatch.elapsedMillis();
- // +1 in case sleepTime=0
- if ((sleepTime + 1) % 10000 == 0) {
- LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
- }
- } catch (NotAllMetaRegionsOnlineException e) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("hbase:meta still not available, sleeping and retrying." +
- " Reason: " + e.getMessage());
- }
- }
- }
- }
-
- /**
- * @param sn ServerName to get a connection against.
- * @return The AdminProtocol we got when we connected to sn
- * May have come from cache, may not be good, may have been setup by this
- * invocation, or may be null.
- * @throws IOException
- */
- @SuppressWarnings("deprecation")
- private AdminService.BlockingInterface getCachedConnection(ServerName sn)
- throws IOException {
- if (sn == null) {
- return null;
- }
- AdminService.BlockingInterface service = null;
- try {
- service = connection.getAdmin(sn);
- } catch (RetriesExhaustedException e) {
- if (e.getCause() != null && e.getCause() instanceof ConnectException) {
- // Catch this; presume it means the cached connection has gone bad.
- } else {
- throw e;
- }
- } catch (SocketTimeoutException e) {
- LOG.debug("Timed out connecting to " + sn);
- } catch (NoRouteToHostException e) {
- LOG.debug("Connecting to " + sn, e);
- } catch (SocketException e) {
- LOG.debug("Exception connecting to " + sn);
- } catch (UnknownHostException e) {
- LOG.debug("Unknown host exception connecting to " + sn);
- } catch (FailedServerException e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Server " + sn + " is in failed server list.");
- }
- } catch (IOException ioe) {
- Throwable cause = ioe.getCause();
- if (ioe instanceof ConnectException) {
- // Catch. Connect refused.
- } else if (cause != null && cause instanceof EOFException) {
- // Catch. Other end disconnected us.
- } else if (cause != null && cause.getMessage() != null &&
- cause.getMessage().toLowerCase().contains("connection reset")) {
- // Catch. Connection reset.
- } else {
- throw ioe;
- }
-
- }
- return service;
- }
-
- /**
- * Verify we can connect to hostingServer and that its carrying
- * regionName.
- * @param hostingServer Interface to the server hosting regionName
- * @param address The servername that goes with the metaServer
- * Interface. Used logging.
- * @param regionName The regionname we are interested in.
- * @return True if we were able to verify the region located at other side of
- * the Interface.
- * @throws IOException
- */
- // TODO: We should be able to get the ServerName from the AdminProtocol
- // rather than have to pass it in. Its made awkward by the fact that the
- // HRI is likely a proxy against remote server so the getServerName needs
- // to be fixed to go to a local method or to a cache before we can do this.
- private boolean verifyRegionLocation(AdminService.BlockingInterface hostingServer,
- final ServerName address, final byte [] regionName)
- throws IOException {
- if (hostingServer == null) {
- LOG.info("Passed hostingServer is null");
- return false;
- }
- Throwable t = null;
- try {
- // Try and get regioninfo from the hosting server.
- return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null;
- } catch (ConnectException e) {
- t = e;
- } catch (RetriesExhaustedException e) {
- t = e;
- } catch (RemoteException e) {
- IOException ioe = e.unwrapRemoteException();
- t = ioe;
- } catch (IOException e) {
- Throwable cause = e.getCause();
- if (cause != null && cause instanceof EOFException) {
- t = cause;
- } else if (cause != null && cause.getMessage() != null
- && cause.getMessage().contains("Connection reset")) {
- t = cause;
- } else {
- t = e;
- }
- }
- LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
- " at address=" + address + ", exception=" + t);
- return false;
- }
-
- /**
- * Verify hbase:meta is deployed and accessible.
- * @param timeout How long to wait on zk for meta address (passed through to
- * the internal call to {@link #waitForMetaServerConnection(long)}.
- * @return True if the hbase:meta location is healthy.
- * @throws IOException
- * @throws InterruptedException
- */
- public boolean verifyMetaRegionLocation(final long timeout)
- throws InterruptedException, IOException {
- AdminService.BlockingInterface service = null;
- try {
- service = waitForMetaServerConnection(timeout);
- } catch (NotAllMetaRegionsOnlineException e) {
- // Pass
- } catch (ServerNotRunningYetException e) {
- // Pass -- remote server is not up so can't be carrying root
- } catch (UnknownHostException e) {
- // Pass -- server name doesn't resolve so it can't be assigned anything.
- } catch (RegionServerStoppedException e) {
- // Pass -- server name sends us to a server that is dying or already dead.
- }
- return (service == null)? false:
- verifyRegionLocation(service,
- this.metaRegionTracker.getMetaRegionLocation(), META_REGION_NAME);
- }
-
- public HConnection getConnection() {
- return this.connection;
- }
-
- @Override
- public String toString() {
- return "CatalogTracker{" + "connection=" + connection + ", zookeeper=" + zookeeper +
- ", metaRegionTracker=" + metaRegionTracker + ", stopped=" + stopped + '}';
- }
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
deleted file mode 100644
index 4f15390..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
+++ /dev/null
@@ -1,614 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.TreeMap;
-
-/**
- * Reads region and assignment information from hbase:meta.
- */
-@InterfaceAudience.Private
-public class MetaReader {
- // TODO: Strip CatalogTracker from this class. Its all over and in the end
- // its only used to get its Configuration so we can get associated
- // Connection.
- private static final Log LOG = LogFactory.getLog(MetaReader.class);
-
- static final byte [] META_REGION_PREFIX;
- static {
- // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
- // FIRST_META_REGIONINFO == 'hbase:meta,,1'. META_REGION_PREFIX == 'hbase:meta,'
- int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
- META_REGION_PREFIX = new byte [len];
- System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
- META_REGION_PREFIX, 0, len);
- }
-
- /**
- * Performs a full scan of hbase:meta, skipping regions from any
- * tables in the specified set of disabled tables.
- * @param catalogTracker
- * @param disabledTables set of disabled tables that will not be returned
- * @return Returns a map of every region to it's currently assigned server,
- * according to META. If the region does not have an assignment it will have
- * a null value in the map.
- * @throws IOException
- */
- public static Map fullScan(
- CatalogTracker catalogTracker, final Set disabledTables)
- throws IOException {
- return fullScan(catalogTracker, disabledTables, false);
- }
-
- /**
- * Performs a full scan of hbase:meta, skipping regions from any
- * tables in the specified set of disabled tables.
- * @param catalogTracker
- * @param disabledTables set of disabled tables that will not be returned
- * @param excludeOfflinedSplitParents If true, do not include offlined split
- * parents in the return.
- * @return Returns a map of every region to it's currently assigned server,
- * according to META. If the region does not have an assignment it will have
- * a null value in the map.
- * @throws IOException
- */
- public static Map fullScan(
- CatalogTracker catalogTracker, final Set disabledTables,
- final boolean excludeOfflinedSplitParents)
- throws IOException {
- final Map regions =
- new TreeMap();
- Visitor v = new Visitor() {
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- Pair region = HRegionInfo.getHRegionInfoAndServerName(r);
- HRegionInfo hri = region.getFirst();
- if (hri == null) return true;
- if (hri.getTable() == null) return true;
- if (disabledTables.contains(
- hri.getTable())) return true;
- // Are we to include split parents in the list?
- if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
- regions.put(hri, region.getSecond());
- return true;
- }
- };
- fullScan(catalogTracker, v);
- return regions;
- }
-
- /**
- * Performs a full scan of hbase:meta.
- * @return List of {@link Result}
- * @throws IOException
- */
- public static List fullScan(CatalogTracker catalogTracker)
- throws IOException {
- CollectAllVisitor v = new CollectAllVisitor();
- fullScan(catalogTracker, v, null);
- return v.getResults();
- }
-
- /**
- * Performs a full scan of a hbase:meta table.
- * @return List of {@link Result}
- * @throws IOException
- */
- public static List fullScanOfMeta(CatalogTracker catalogTracker)
- throws IOException {
- CollectAllVisitor v = new CollectAllVisitor();
- fullScan(catalogTracker, v, null);
- return v.getResults();
- }
-
- /**
- * Performs a full scan of hbase:meta.
- * @param catalogTracker
- * @param visitor Visitor invoked against each row.
- * @throws IOException
- */
- public static void fullScan(CatalogTracker catalogTracker,
- final Visitor visitor)
- throws IOException {
- fullScan(catalogTracker, visitor, null);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param catalogTracker We'll use this catalogtracker's connection
- * @param tableName Table to get an {@link HTable} against.
- * @return An {@link HTable} for tableName
- * @throws IOException
- */
- private static HTable getHTable(final CatalogTracker catalogTracker,
- final TableName tableName)
- throws IOException {
- // Passing the CatalogTracker's connection ensures this
- // HTable instance uses the CatalogTracker's connection.
- org.apache.hadoop.hbase.client.HConnection c = catalogTracker.getConnection();
- if (c == null) throw new NullPointerException("No connection");
- return new HTable(tableName, c);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param catalogTracker
- * @return An {@link HTable} for hbase:meta
- * @throws IOException
- */
- static HTable getCatalogHTable(final CatalogTracker catalogTracker)
- throws IOException {
- return getMetaHTable(catalogTracker);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param ct
- * @return An {@link HTable} for hbase:meta
- * @throws IOException
- */
- static HTable getMetaHTable(final CatalogTracker ct)
- throws IOException {
- return getHTable(ct, TableName.META_TABLE_NAME);
- }
-
- /**
- * @param t Table to use (will be closed when done).
- * @param g Get to run
- * @throws IOException
- */
- private static Result get(final HTable t, final Get g) throws IOException {
- try {
- return t.get(g);
- } finally {
- t.close();
- }
- }
-
- /**
- * Reads the location of the specified region
- * @param catalogTracker
- * @param regionName region whose location we are after
- * @return location of region as a {@link ServerName} or null if not found
- * @throws IOException
- */
- static ServerName readRegionLocation(CatalogTracker catalogTracker,
- byte [] regionName)
- throws IOException {
- Pair pair = getRegion(catalogTracker, regionName);
- return (pair == null || pair.getSecond() == null)? null: pair.getSecond();
- }
-
- /**
- * Gets the region info and assignment for the specified region.
- * @param catalogTracker
- * @param regionName Region to lookup.
- * @return Location and HRegionInfo for regionName
- * @throws IOException
- */
- public static Pair getRegion(
- CatalogTracker catalogTracker, byte [] regionName)
- throws IOException {
- Get get = new Get(regionName);
- get.addFamily(HConstants.CATALOG_FAMILY);
- Result r = get(getCatalogHTable(catalogTracker), get);
- return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r);
- }
-
- /**
- * Gets the result in hbase:meta for the specified region.
- * @param catalogTracker
- * @param regionName
- * @return result of the specified region
- * @throws IOException
- */
- public static Result getRegionResult(CatalogTracker catalogTracker,
- byte[] regionName) throws IOException {
- Get get = new Get(regionName);
- get.addFamily(HConstants.CATALOG_FAMILY);
- return get(getCatalogHTable(catalogTracker), get);
- }
-
- /**
- * Get regions from the merge qualifier of the specified merged region
- * @return null if it doesn't contain merge qualifier, else two merge regions
- * @throws IOException
- */
- public static Pair getRegionsFromMergeQualifier(
- CatalogTracker catalogTracker, byte[] regionName) throws IOException {
- Result result = getRegionResult(catalogTracker, regionName);
- HRegionInfo mergeA = HRegionInfo.getHRegionInfo(result,
- HConstants.MERGEA_QUALIFIER);
- HRegionInfo mergeB = HRegionInfo.getHRegionInfo(result,
- HConstants.MERGEB_QUALIFIER);
- if (mergeA == null && mergeB == null) {
- return null;
- }
- return new Pair(mergeA, mergeB);
- }
-
- /**
- * Checks if the specified table exists. Looks at the hbase:meta table hosted on
- * the specified server.
- * @param catalogTracker
- * @param tableName table to check
- * @return true if the table exists in meta, false if not
- * @throws IOException
- */
- public static boolean tableExists(CatalogTracker catalogTracker,
- final TableName tableName)
- throws IOException {
- if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
- // Catalog tables always exist.
- return true;
- }
- // Make a version of ResultCollectingVisitor that only collects the first
- CollectingVisitor visitor = new CollectingVisitor() {
- private HRegionInfo current = null;
-
- @Override
- public boolean visit(Result r) throws IOException {
- this.current =
- HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
- if (this.current == null) {
- LOG.warn("No serialized HRegionInfo in " + r);
- return true;
- }
- if (!isInsideTable(this.current, tableName)) return false;
- // Else call super and add this Result to the collection.
- super.visit(r);
- // Stop collecting regions from table after we get one.
- return false;
- }
-
- @Override
- void add(Result r) {
- // Add the current HRI.
- this.results.add(this.current);
- }
- };
- fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName));
- // If visitor has results >= 1 then table exists.
- return visitor.getResults().size() >= 1;
- }
-
- /**
- * Gets all of the regions of the specified table.
- * @param catalogTracker
- * @param tableName
- * @return Ordered list of {@link HRegionInfo}.
- * @throws IOException
- */
- public static List getTableRegions(CatalogTracker catalogTracker,
- TableName tableName)
- throws IOException {
- return getTableRegions(catalogTracker, tableName, false);
- }
-
- /**
- * Gets all of the regions of the specified table.
- * @param catalogTracker
- * @param tableName
- * @param excludeOfflinedSplitParents If true, do not include offlined split
- * parents in the return.
- * @return Ordered list of {@link HRegionInfo}.
- * @throws IOException
- */
- public static List getTableRegions(CatalogTracker catalogTracker,
- TableName tableName, final boolean excludeOfflinedSplitParents)
- throws IOException {
- List> result = null;
- try {
- result = getTableRegionsAndLocations(catalogTracker, tableName,
- excludeOfflinedSplitParents);
- } catch (InterruptedException e) {
- throw (InterruptedIOException)new InterruptedIOException().initCause(e);
- }
- return getListOfHRegionInfos(result);
- }
-
- static List getListOfHRegionInfos(final List> pairs) {
- if (pairs == null || pairs.isEmpty()) return null;
- List result = new ArrayList(pairs.size());
- for (Pair pair: pairs) {
- result.add(pair.getFirst());
- }
- return result;
- }
-
- /**
- * @param current
- * @param tableName
- * @return True if current tablename is equal to
- * tableName
- */
- static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
- return tableName.equals(current.getTable());
- }
-
- /**
- * @param tableName
- * @return Place to start Scan in hbase:meta when passed a
- * tableName; returns <tableName&rt; <,&rt; <,&rt;
- */
- static byte [] getTableStartRowForMeta(TableName tableName) {
- byte [] startRow = new byte[tableName.getName().length + 2];
- System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
- startRow[startRow.length - 2] = HConstants.DELIMITER;
- startRow[startRow.length - 1] = HConstants.DELIMITER;
- return startRow;
- }
-
- /**
- * This method creates a Scan object that will only scan catalog rows that
- * belong to the specified table. It doesn't specify any columns.
- * This is a better alternative to just using a start row and scan until
- * it hits a new table since that requires parsing the HRI to get the table
- * name.
- * @param tableName bytes of table's name
- * @return configured Scan object
- */
- public static Scan getScanForTableName(TableName tableName) {
- String strName = tableName.getNameAsString();
- // Start key is just the table name with delimiters
- byte[] startKey = Bytes.toBytes(strName + ",,");
- // Stop key appends the smallest possible char to the table name
- byte[] stopKey = Bytes.toBytes(strName + " ,,");
-
- Scan scan = new Scan(startKey);
- scan.setStopRow(stopKey);
- return scan;
- }
-
- /**
- * @param catalogTracker
- * @param tableName
- * @return Return list of regioninfos and server.
- * @throws IOException
- * @throws InterruptedException
- */
- public static List>
- getTableRegionsAndLocations(CatalogTracker catalogTracker, TableName tableName)
- throws IOException, InterruptedException {
- return getTableRegionsAndLocations(catalogTracker, tableName,
- true);
- }
-
- /**
- * @param catalogTracker
- * @param tableName
- * @return Return list of regioninfos and server addresses.
- * @throws IOException
- * @throws InterruptedException
- */
- public static List>
- getTableRegionsAndLocations(final CatalogTracker catalogTracker,
- final TableName tableName, final boolean excludeOfflinedSplitParents)
- throws IOException, InterruptedException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
- // If meta, do a bit of special handling.
- ServerName serverName = catalogTracker.getMetaLocation();
- List> list =
- new ArrayList>();
- list.add(new Pair(HRegionInfo.FIRST_META_REGIONINFO,
- serverName));
- return list;
- }
- // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
- CollectingVisitor> visitor =
- new CollectingVisitor>() {
- private Pair current = null;
-
- @Override
- public boolean visit(Result r) throws IOException {
- HRegionInfo hri =
- HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
- if (hri == null) {
- LOG.warn("No serialized HRegionInfo in " + r);
- return true;
- }
- if (!isInsideTable(hri, tableName)) return false;
- if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
- ServerName sn = HRegionInfo.getServerName(r);
- // Populate this.current so available when we call #add
- this.current = new Pair(hri, sn);
- // Else call super and add this Result to the collection.
- return super.visit(r);
- }
-
- @Override
- void add(Result r) {
- this.results.add(this.current);
- }
- };
- fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName));
- return visitor.getResults();
- }
-
- /**
- * @param catalogTracker
- * @param serverName
- * @return List of user regions installed on this server (does not include
- * catalog regions).
- * @throws IOException
- */
- public static NavigableMap
- getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName)
- throws IOException {
- final NavigableMap hris = new TreeMap();
- // Fill the above hris map with entries from hbase:meta that have the passed
- // servername.
- CollectingVisitor v = new CollectingVisitor() {
- @Override
- void add(Result r) {
- if (r == null || r.isEmpty()) return;
- ServerName sn = HRegionInfo.getServerName(r);
- if (sn != null && sn.equals(serverName)) this.results.add(r);
- }
- };
- fullScan(catalogTracker, v);
- List results = v.getResults();
- if (results != null && !results.isEmpty()) {
- // Convert results to Map keyed by HRI
- for (Result r: results) {
- Pair p = HRegionInfo.getHRegionInfoAndServerName(r);
- if (p != null && p.getFirst() != null) hris.put(p.getFirst(), r);
- }
- }
- return hris;
- }
-
- public static void fullScanMetaAndPrint(final CatalogTracker catalogTracker)
- throws IOException {
- Visitor v = new Visitor() {
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
- HRegionInfo hrim = HRegionInfo.getHRegionInfo(r);
- LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim);
- return true;
- }
- };
- fullScan(catalogTracker, v);
- }
-
- /**
- * Performs a full scan of a catalog table.
- * @param catalogTracker
- * @param visitor Visitor invoked against each row.
- * @param startrow Where to start the scan. Pass null if want to begin scan
- * at first row.
- * hbase:meta, the default (pass false to scan hbase:meta)
- * @throws IOException
- */
- public static void fullScan(CatalogTracker catalogTracker,
- final Visitor visitor, final byte [] startrow)
- throws IOException {
- Scan scan = new Scan();
- if (startrow != null) scan.setStartRow(startrow);
- if (startrow == null) {
- int caching = catalogTracker.getConnection().getConfiguration()
- .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
- scan.setCaching(caching);
- }
- scan.addFamily(HConstants.CATALOG_FAMILY);
- HTable metaTable = getMetaHTable(catalogTracker);
- ResultScanner scanner = null;
- try {
- scanner = metaTable.getScanner(scan);
- Result data;
- while((data = scanner.next()) != null) {
- if (data.isEmpty()) continue;
- // Break if visit returns false.
- if (!visitor.visit(data)) break;
- }
- } finally {
- if (scanner != null) scanner.close();
- metaTable.close();
- }
- return;
- }
-
- /**
- * Implementations 'visit' a catalog table row.
- */
- public interface Visitor {
- /**
- * Visit the catalog table row.
- * @param r A row from catalog table
- * @return True if we are to proceed scanning the table, else false if
- * we are to stop now.
- */
- boolean visit(final Result r) throws IOException;
- }
-
- /**
- * A {@link Visitor} that collects content out of passed {@link Result}.
- */
- static abstract class CollectingVisitor implements Visitor {
- final List results = new ArrayList();
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- add(r);
- return true;
- }
-
- abstract void add(Result r);
-
- /**
- * @return Collected results; wait till visits complete to collect all
- * possible results
- */
- List getResults() {
- return this.results;
- }
- }
-
- /**
- * Collects all returned.
- */
- static class CollectAllVisitor extends CollectingVisitor {
- @Override
- void add(Result r) {
- this.results.add(r);
- }
- }
-
- /**
- * Count regions in hbase:meta for passed table.
- * @param c
- * @param tableName
- * @return Count or regions in table tableName
- * @throws IOException
- */
- public static int getRegionCount(final Configuration c, final String tableName) throws IOException {
- HTable t = new HTable(c, tableName);
- try {
- return t.getRegionLocations().size();
- } finally {
- t.close();
- }
- }
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaTableAccessor.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaTableAccessor.java
new file mode 100644
index 0000000..48365ae
--- /dev/null
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaTableAccessor.java
@@ -0,0 +1,1138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.catalog;
+
+import com.google.protobuf.ServiceException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.net.ConnectException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * Read/write operations on region and assignment information store in
+ * hbase:meta.
+ *
+ * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
+ * for this is because when used on client-side (like from HBaseAdmin), we want to use
+ * short-living connection (opened before each operation, closed right after), while
+ * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
+ */
+@InterfaceAudience.Private
+public class MetaTableAccessor {
+ private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
+
+ static final byte [] META_REGION_PREFIX;
+ static {
+ // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
+ // FIRST_META_REGIONINFO == 'hbase:meta,,1'. META_REGION_PREFIX == 'hbase:meta,'
+ int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
+ META_REGION_PREFIX = new byte [len];
+ System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
+ META_REGION_PREFIX, 0, len);
+ }
+
+ ////////////////////////
+ // Reading operations //
+ ////////////////////////
+
+ /**
+ * Performs a full scan of hbase:meta, skipping regions from any
+ * tables in the specified set of disabled tables.
+ * @param hConnection connection we're using
+ * @param disabledTables set of disabled tables that will not be returned
+ * @return Returns a map of every region to it's currently assigned server,
+ * according to META. If the region does not have an assignment it will have
+ * a null value in the map.
+ * @throws IOException
+ */
+ public static Map fullScan(
+ HConnection hConnection, final Set disabledTables)
+ throws IOException {
+ return fullScan(hConnection, disabledTables, false);
+ }
+
+ /**
+ * Performs a full scan of hbase:meta, skipping regions from any
+ * tables in the specified set of disabled tables.
+ * @param hConnection connection we're using
+ * @param disabledTables set of disabled tables that will not be returned
+ * @param excludeOfflinedSplitParents If true, do not include offlined split
+ * parents in the return.
+ * @return Returns a map of every region to it's currently assigned server,
+ * according to META. If the region does not have an assignment it will have
+ * a null value in the map.
+ * @throws IOException
+ */
+ public static Map fullScan(
+ HConnection hConnection, final Set disabledTables,
+ final boolean excludeOfflinedSplitParents)
+ throws IOException {
+ final Map regions =
+ new TreeMap();
+ Visitor v = new Visitor() {
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ Pair region = HRegionInfo.getHRegionInfoAndServerName(r);
+ HRegionInfo hri = region.getFirst();
+ if (hri == null) return true;
+ if (hri.getTable() == null) return true;
+ if (disabledTables.contains(
+ hri.getTable())) return true;
+ // Are we to include split parents in the list?
+ if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
+ regions.put(hri, region.getSecond());
+ return true;
+ }
+ };
+ fullScan(hConnection, v);
+ return regions;
+ }
+
+ /**
+ * Performs a full scan of a hbase:meta table.
+ * @return List of {@link Result}
+ * @throws IOException
+ */
+ public static List fullScanOfMeta(HConnection hConnection)
+ throws IOException {
+ CollectAllVisitor v = new CollectAllVisitor();
+ fullScan(hConnection, v, null);
+ return v.getResults();
+ }
+
+ /**
+ * Performs a full scan of hbase:meta.
+ * @param hConnection connection we're using
+ * @param visitor Visitor invoked against each row.
+ * @throws IOException
+ */
+ public static void fullScan(HConnection hConnection,
+ final Visitor visitor)
+ throws IOException {
+ fullScan(hConnection, visitor, null);
+ }
+
+ /**
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param hConnection connection we're using to access table
+ * @param tableName Table to get an {@link HTable} against.
+ * @return An {@link HTable} for tableName
+ * @throws IOException
+ */
+ private static HTable getHTable(final HConnection hConnection,
+ final TableName tableName)
+ throws IOException {
+ // We used to pass whole CatalogTracker in here, now we just pass in HConnection
+ if (hConnection == null || hConnection.isClosed()) {
+ throw new NullPointerException("No connection");
+ }
+ return new HTable(tableName, hConnection);
+ }
+
+ /**
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param hConnection connection we're using to access Meta
+ * @return An {@link HTable} for hbase:meta
+ * @throws IOException
+ */
+ static HTable getMetaHTable(final HConnection hConnection)
+ throws IOException {
+ return getHTable(hConnection, TableName.META_TABLE_NAME);
+ }
+
+ /**
+ * @param t Table to use (will be closed when done).
+ * @param g Get to run
+ * @throws IOException
+ */
+ private static Result get(final HTable t, final Get g) throws IOException {
+ try {
+ return t.get(g);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Reads the location of the specified region
+ * @param hConnection connection we're using
+ * @param regionName region whose location we are after
+ * @return location of region as a {@link ServerName} or null if not found
+ * @throws IOException
+ */
+ static ServerName readRegionLocation(HConnection hConnection,
+ byte [] regionName)
+ throws IOException {
+ Pair pair = getRegion(hConnection, regionName);
+ return (pair == null || pair.getSecond() == null)? null: pair.getSecond();
+ }
+
+ /**
+ * Gets the region info and assignment for the specified region.
+ * @param hConnection connection we're using
+ * @param regionName Region to lookup.
+ * @return Location and HRegionInfo for regionName
+ * @throws IOException
+ */
+ public static Pair getRegion(
+ HConnection hConnection, byte [] regionName)
+ throws IOException {
+ Get get = new Get(regionName);
+ get.addFamily(HConstants.CATALOG_FAMILY);
+ Result r = get(getMetaHTable(hConnection), get);
+ return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r);
+ }
+
+ /**
+ * Gets the result in hbase:meta for the specified region.
+ * @param hConnection connection we're using
+ * @param regionName region we're looking for
+ * @return result of the specified region
+ * @throws IOException
+ */
+ public static Result getRegionResult(HConnection hConnection,
+ byte[] regionName) throws IOException {
+ Get get = new Get(regionName);
+ get.addFamily(HConstants.CATALOG_FAMILY);
+ return get(getMetaHTable(hConnection), get);
+ }
+
+ /**
+ * Get regions from the merge qualifier of the specified merged region
+ * @return null if it doesn't contain merge qualifier, else two merge regions
+ * @throws IOException
+ */
+ public static Pair getRegionsFromMergeQualifier(
+ HConnection hConnection, byte[] regionName) throws IOException {
+ Result result = getRegionResult(hConnection, regionName);
+ HRegionInfo mergeA = HRegionInfo.getHRegionInfo(result,
+ HConstants.MERGEA_QUALIFIER);
+ HRegionInfo mergeB = HRegionInfo.getHRegionInfo(result,
+ HConstants.MERGEB_QUALIFIER);
+ if (mergeA == null && mergeB == null) {
+ return null;
+ }
+ return new Pair(mergeA, mergeB);
+ }
+
+ /**
+ * Checks if the specified table exists. Looks at the hbase:meta table hosted on
+ * the specified server.
+ * @param hConnection connection we're using
+ * @param tableName table to check
+ * @return true if the table exists in meta, false if not
+ * @throws IOException
+ */
+ public static boolean tableExists(HConnection hConnection,
+ final TableName tableName)
+ throws IOException {
+ if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
+ // Catalog tables always exist.
+ return true;
+ }
+ // Make a version of ResultCollectingVisitor that only collects the first
+ CollectingVisitor visitor = new CollectingVisitor() {
+ private HRegionInfo current = null;
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ this.current =
+ HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ if (this.current == null) {
+ LOG.warn("No serialized HRegionInfo in " + r);
+ return true;
+ }
+ if (!isInsideTable(this.current, tableName)) return false;
+ // Else call super and add this Result to the collection.
+ super.visit(r);
+ // Stop collecting regions from table after we get one.
+ return false;
+ }
+
+ @Override
+ void add(Result r) {
+ // Add the current HRI.
+ this.results.add(this.current);
+ }
+ };
+ fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+ // If visitor has results >= 1 then table exists.
+ return visitor.getResults().size() >= 1;
+ }
+
+ /**
+ * Gets all of the regions of the specified table.
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @return Ordered list of {@link HRegionInfo}.
+ * @throws IOException
+ */
+ public static List getTableRegions(ZooKeeperWatcher zkw,
+ HConnection hConnection, TableName tableName)
+ throws IOException {
+ return getTableRegions(zkw, hConnection, tableName, false);
+ }
+
+ /**
+ * Gets all of the regions of the specified table.
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @param excludeOfflinedSplitParents If true, do not include offlined split
+ * parents in the return.
+ * @return Ordered list of {@link HRegionInfo}.
+ * @throws IOException
+ */
+ public static List getTableRegions(ZooKeeperWatcher zkw,
+ HConnection hConnection, TableName tableName, final boolean excludeOfflinedSplitParents)
+ throws IOException {
+ List> result = null;
+ try {
+ result = getTableRegionsAndLocations(zkw, hConnection, tableName,
+ excludeOfflinedSplitParents);
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ return getListOfHRegionInfos(result);
+ }
+
+ static List getListOfHRegionInfos(final List> pairs) {
+ if (pairs == null || pairs.isEmpty()) return null;
+ List result = new ArrayList(pairs.size());
+ for (Pair pair: pairs) {
+ result.add(pair.getFirst());
+ }
+ return result;
+ }
+
+ /**
+ * @param current
+ * @param tableName
+ * @return True if current tablename is equal to
+ * tableName
+ */
+ static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
+ return tableName.equals(current.getTable());
+ }
+
+ /**
+ * @param tableName
+ * @return Place to start Scan in hbase:meta when passed a
+ * tableName; returns <tableName&rt; <,&rt; <,&rt;
+ */
+ static byte [] getTableStartRowForMeta(TableName tableName) {
+ byte [] startRow = new byte[tableName.getName().length + 2];
+ System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
+ startRow[startRow.length - 2] = HConstants.DELIMITER;
+ startRow[startRow.length - 1] = HConstants.DELIMITER;
+ return startRow;
+ }
+
+ /**
+ * This method creates a Scan object that will only scan catalog rows that
+ * belong to the specified table. It doesn't specify any columns.
+ * This is a better alternative to just using a start row and scan until
+ * it hits a new table since that requires parsing the HRI to get the table
+ * name.
+ * @param tableName bytes of table's name
+ * @return configured Scan object
+ */
+ public static Scan getScanForTableName(TableName tableName) {
+ String strName = tableName.getNameAsString();
+ // Start key is just the table name with delimiters
+ byte[] startKey = Bytes.toBytes(strName + ",,");
+ // Stop key appends the smallest possible char to the table name
+ byte[] stopKey = Bytes.toBytes(strName + " ,,");
+
+ Scan scan = new Scan(startKey);
+ scan.setStopRow(stopKey);
+ return scan;
+ }
+
+ /**
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @return Return list of regioninfos and server.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public static List>
+ getTableRegionsAndLocations(ZooKeeperWatcher zkw,
+ HConnection hConnection, TableName tableName)
+ throws IOException, InterruptedException {
+ return getTableRegionsAndLocations(zkw, hConnection, tableName, true);
+ }
+
+ /**
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @return Return list of regioninfos and server addresses.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public static List>
+ getTableRegionsAndLocations(final ZooKeeperWatcher zkw,
+ final HConnection hConnection, final TableName tableName,
+ final boolean excludeOfflinedSplitParents)
+ throws IOException, InterruptedException {
+ if (tableName.equals(TableName.META_TABLE_NAME)) {
+ // If meta, do a bit of special handling.
+ ServerName serverName = null;
+ serverName = MetaRegionLocator.getMetaRegionLocation(zkw);
+ List> list =
+ new ArrayList>();
+ list.add(new Pair(HRegionInfo.FIRST_META_REGIONINFO,
+ serverName));
+ return list;
+ }
+ // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
+ CollectingVisitor> visitor =
+ new CollectingVisitor>() {
+ private Pair current = null;
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ HRegionInfo hri =
+ HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ if (hri == null) {
+ LOG.warn("No serialized HRegionInfo in " + r);
+ return true;
+ }
+ if (!isInsideTable(hri, tableName)) return false;
+ if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
+ ServerName sn = HRegionInfo.getServerName(r);
+ // Populate this.current so available when we call #add
+ this.current = new Pair(hri, sn);
+ // Else call super and add this Result to the collection.
+ return super.visit(r);
+ }
+
+ @Override
+ void add(Result r) {
+ this.results.add(this.current);
+ }
+ };
+ fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+ return visitor.getResults();
+ }
+
+ /**
+ * @param hConnection connection we're using
+ * @param serverName server we're connected to
+ * @return List of user regions installed on this server (does not include
+ * catalog regions).
+ * @throws IOException
+ */
+ public static NavigableMap
+ getServerUserRegions(HConnection hConnection, final ServerName serverName)
+ throws IOException {
+ final NavigableMap hris = new TreeMap();
+ // Fill the above hris map with entries from hbase:meta that have the passed
+ // servername.
+ CollectingVisitor v = new CollectingVisitor() {
+ @Override
+ void add(Result r) {
+ if (r == null || r.isEmpty()) return;
+ ServerName sn = HRegionInfo.getServerName(r);
+ if (sn != null && sn.equals(serverName)) this.results.add(r);
+ }
+ };
+ fullScan(hConnection, v);
+ List results = v.getResults();
+ if (results != null && !results.isEmpty()) {
+ // Convert results to Map keyed by HRI
+ for (Result r: results) {
+ Pair p = HRegionInfo.getHRegionInfoAndServerName(r);
+ if (p != null && p.getFirst() != null) hris.put(p.getFirst(), r);
+ }
+ }
+ return hris;
+ }
+
+ public static void fullScanMetaAndPrint(final HConnection hConnection)
+ throws IOException {
+ Visitor v = new Visitor() {
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
+ HRegionInfo hrim = HRegionInfo.getHRegionInfo(r);
+ LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim);
+ return true;
+ }
+ };
+ fullScan(hConnection, v);
+ }
+
+ /**
+ * Performs a full scan of a catalog table.
+ * @param hConnection connection we're using
+ * @param visitor Visitor invoked against each row.
+ * @param startrow Where to start the scan. Pass null if want to begin scan
+ * at first row.
+ * hbase:meta, the default (pass false to scan hbase:meta)
+ * @throws IOException
+ */
+ public static void fullScan(HConnection hConnection,
+ final Visitor visitor, final byte [] startrow)
+ throws IOException {
+ Scan scan = new Scan();
+ if (startrow != null) scan.setStartRow(startrow);
+ if (startrow == null) {
+ int caching = hConnection.getConfiguration()
+ .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
+ scan.setCaching(caching);
+ }
+ scan.addFamily(HConstants.CATALOG_FAMILY);
+ HTable metaTable = getMetaHTable(hConnection);
+ ResultScanner scanner = null;
+ try {
+ scanner = metaTable.getScanner(scan);
+ Result data;
+ while((data = scanner.next()) != null) {
+ if (data.isEmpty()) continue;
+ // Break if visit returns false.
+ if (!visitor.visit(data)) break;
+ }
+ } finally {
+ if (scanner != null) scanner.close();
+ metaTable.close();
+ }
+ return;
+ }
+
+ /**
+ * Implementations 'visit' a catalog table row.
+ */
+ public interface Visitor {
+ /**
+ * Visit the catalog table row.
+ * @param r A row from catalog table
+ * @return True if we are to proceed scanning the table, else false if
+ * we are to stop now.
+ */
+ boolean visit(final Result r) throws IOException;
+ }
+
+ /**
+ * A {@link Visitor} that collects content out of passed {@link Result}.
+ */
+ static abstract class CollectingVisitor implements Visitor {
+ final List results = new ArrayList();
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ add(r);
+ return true;
+ }
+
+ abstract void add(Result r);
+
+ /**
+ * @return Collected results; wait till visits complete to collect all
+ * possible results
+ */
+ List getResults() {
+ return this.results;
+ }
+ }
+
+ /**
+ * Collects all returned.
+ */
+ static class CollectAllVisitor extends CollectingVisitor {
+ @Override
+ void add(Result r) {
+ this.results.add(r);
+ }
+ }
+
+ /**
+ * Count regions in hbase:meta for passed table.
+ * @param c
+ * @param tableName
+ * @return Count or regions in table tableName
+ * @throws IOException
+ */
+ public static int getRegionCount(final Configuration c, final String tableName) throws IOException {
+ HTable t = new HTable(c, tableName);
+ try {
+ return t.getRegionLocations().size();
+ } finally {
+ t.close();
+ }
+ }
+
+ ////////////////////////
+ // Editing operations //
+ ////////////////////////
+
+ /**
+ * Generates and returns a Put containing the region into for the catalog table
+ */
+ public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
+ throws IOException {
+ Put put = new Put(regionInfo.getRegionName());
+ addRegionInfo(put, regionInfo);
+ return put;
+ }
+
+ /**
+ * Generates and returns a Delete containing the region info for the catalog
+ * table
+ */
+ public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
+ if (regionInfo == null) {
+ throw new IllegalArgumentException("Can't make a delete for null region");
+ }
+ Delete delete = new Delete(regionInfo.getRegionName());
+ return delete;
+ }
+
+ /**
+ * Adds split daughters to the Put
+ */
+ public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
+ if (splitA != null) {
+ put.addImmutable(
+ HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
+ }
+ if (splitB != null) {
+ put.addImmutable(
+ HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
+ }
+ return put;
+ }
+
+ /**
+ * Put the passed p to the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param p Put to add to hbase:meta
+ * @throws IOException
+ */
+ static void putToMetaTable(final HConnection hConnection, final Put p)
+ throws IOException {
+ put(getMetaHTable(hConnection), p);
+ }
+
+ /**
+ * @param t Table to use (will be closed when done).
+ * @param p put to make
+ * @throws IOException
+ */
+ private static void put(final HTable t, final Put p) throws IOException {
+ try {
+ t.put(p);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Put the passed ps to the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param ps Put to add to hbase:meta
+ * @throws IOException
+ */
+ public static void putsToMetaTable(final HConnection hConnection, final List ps)
+ throws IOException {
+ HTable t = getMetaHTable(hConnection);
+ try {
+ t.put(ps);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Delete the passed d from the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param d Delete to add to hbase:meta
+ * @throws IOException
+ */
+ static void deleteFromMetaTable(final HConnection hConnection, final Delete d)
+ throws IOException {
+ List dels = new ArrayList(1);
+ dels.add(d);
+ deleteFromMetaTable(hConnection, dels);
+ }
+
+ /**
+ * Delete the passed deletes from the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param deletes Deletes to add to hbase:meta This list should support #remove.
+ * @throws IOException
+ */
+ public static void deleteFromMetaTable(final HConnection hConnection, final List deletes)
+ throws IOException {
+ HTable t = getMetaHTable(hConnection);
+ try {
+ t.delete(deletes);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Execute the passed mutations against hbase:meta table.
+ * @param hConnection connection we're using
+ * @param mutations Puts and Deletes to execute on hbase:meta
+ * @throws IOException
+ */
+ public static void mutateMetaTable(final HConnection hConnection,
+ final List mutations)
+ throws IOException {
+ HTable t = getMetaHTable(hConnection);
+ try {
+ t.batch(mutations);
+ } catch (InterruptedException e) {
+ InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+ ie.initCause(e);
+ throw ie;
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Adds a hbase:meta row for the specified new region.
+ * @param hConnection connection we're using
+ * @param regionInfo region information
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HConnection hConnection,
+ HRegionInfo regionInfo)
+ throws IOException {
+ putToMetaTable(hConnection, makePutFromRegionInfo(regionInfo));
+ LOG.info("Added " + regionInfo.getRegionNameAsString());
+ }
+
+ /**
+ * Adds a hbase:meta row for the specified new region to the given catalog table. The
+ * HTable is not flushed or closed.
+ * @param meta the HTable for META
+ * @param regionInfo region information
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo) throws IOException {
+ addRegionToMeta(meta, regionInfo, null, null);
+ }
+
+ /**
+ * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
+ * not add its daughter's as different rows, but adds information about the daughters
+ * in the same row as the parent. Use
+ * {@link #splitRegion(org.apache.hadoop.hbase.client.HConnection,
+ * HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
+ * if you want to do that.
+ * @param meta the HTable for META
+ * @param regionInfo region information
+ * @param splitA first split daughter of the parent regionInfo
+ * @param splitB second split daughter of the parent regionInfo
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo,
+ HRegionInfo splitA, HRegionInfo splitB) throws IOException {
+ Put put = makePutFromRegionInfo(regionInfo);
+ addDaughtersToPut(put, splitA, splitB);
+ meta.put(put);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added " + regionInfo.getRegionNameAsString());
+ }
+ }
+
+ /**
+ * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
+ * not add its daughter's as different rows, but adds information about the daughters
+ * in the same row as the parent. Use
+ * {@link #splitRegion(HConnection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
+ * if you want to do that.
+ * @param hConnection connection we're using
+ * @param regionInfo region information
+ * @param splitA first split daughter of the parent regionInfo
+ * @param splitB second split daughter of the parent regionInfo
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HConnection hConnection, HRegionInfo regionInfo,
+ HRegionInfo splitA, HRegionInfo splitB) throws IOException {
+ HTable meta = getMetaHTable(hConnection);
+ try {
+ addRegionToMeta(meta, regionInfo, splitA, splitB);
+ } finally {
+ meta.close();
+ }
+ }
+
+ /**
+ * Adds a hbase:meta row for each of the specified new regions.
+ * @param hConnection connection we're using
+ * @param regionInfos region information list
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionsToMeta(HConnection hConnection,
+ List regionInfos)
+ throws IOException {
+ List puts = new ArrayList();
+ for (HRegionInfo regionInfo : regionInfos) {
+ puts.add(makePutFromRegionInfo(regionInfo));
+ }
+ putsToMetaTable(hConnection, puts);
+ LOG.info("Added " + puts.size());
+ }
+
+ /**
+ * Adds a daughter region entry to meta.
+ * @param regionInfo the region to put
+ * @param sn the location of the region
+ * @param openSeqNum the latest sequence number obtained when the region was open
+ */
+ public static void addDaughter(final HConnection hConnection,
+ final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
+ throws NotAllMetaRegionsOnlineException, IOException {
+ Put put = new Put(regionInfo.getRegionName());
+ addRegionInfo(put, regionInfo);
+ if (sn != null) {
+ addLocation(put, sn, openSeqNum);
+ }
+ putToMetaTable(hConnection, put);
+ LOG.info("Added daughter " + regionInfo.getEncodedName() +
+ (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
+ }
+
+ /**
+ * Merge the two regions into one in an atomic operation. Deletes the two
+ * merging regions in hbase:meta and adds the merged region with the information of
+ * two merging regions.
+ * @param hConnection connection we're using
+ * @param mergedRegion the merged region
+ * @param regionA
+ * @param regionB
+ * @param sn the location of the region
+ * @throws IOException
+ */
+ public static void mergeRegions(final HConnection hConnection,
+ HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
+ ServerName sn) throws IOException {
+ HTable meta = getMetaHTable(hConnection);
+ try {
+ HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
+
+ // Put for parent
+ Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
+ putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
+ regionA.toByteArray());
+ putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
+ regionB.toByteArray());
+
+ // Deletes for merging regions
+ Delete deleteA = makeDeleteFromRegionInfo(regionA);
+ Delete deleteB = makeDeleteFromRegionInfo(regionB);
+
+ // The merged is a new region, openSeqNum = 1 is fine.
+ addLocation(putOfMerged, sn, 1);
+
+ byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+ + HConstants.DELIMITER);
+ multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
+ } finally {
+ meta.close();
+ }
+ }
+
+ /**
+ * Splits the region into two in an atomic operation. Offlines the parent
+ * region with the information that it is split into two, and also adds
+ * the daughter regions. Does not add the location information to the daughter
+ * regions since they are not open yet.
+ * @param hConnection connection we're using
+ * @param parent the parent region which is split
+ * @param splitA Split daughter region A
+ * @param splitB Split daughter region A
+ * @param sn the location of the region
+ */
+ public static void splitRegion(final HConnection hConnection,
+ HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
+ ServerName sn) throws IOException {
+ HTable meta = getMetaHTable(hConnection);
+ try {
+ HRegionInfo copyOfParent = new HRegionInfo(parent);
+ copyOfParent.setOffline(true);
+ copyOfParent.setSplit(true);
+
+ //Put for parent
+ Put putParent = makePutFromRegionInfo(copyOfParent);
+ addDaughtersToPut(putParent, splitA, splitB);
+
+ //Puts for daughters
+ Put putA = makePutFromRegionInfo(splitA);
+ Put putB = makePutFromRegionInfo(splitB);
+
+ addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
+ addLocation(putB, sn, 1);
+
+ byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
+ multiMutate(meta, tableRow, putParent, putA, putB);
+ } finally {
+ meta.close();
+ }
+ }
+
+ /**
+ * Performs an atomic multi-Mutate operation against the given table.
+ */
+ private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
+ CoprocessorRpcChannel channel = table.coprocessorService(row);
+ MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
+ for (Mutation mutation : mutations) {
+ if (mutation instanceof Put) {
+ mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
+ } else if (mutation instanceof Delete) {
+ mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
+ } else {
+ throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
+ + mutation.getClass().getName());
+ }
+ }
+
+ MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
+ MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
+ try {
+ service.mutateRows(null, mmrBuilder.build());
+ } catch (ServiceException ex) {
+ ProtobufUtil.toIOException(ex);
+ }
+ }
+
+
+ /**
+ * Updates the location of the specified hbase:meta region in ROOT to be the
+ * specified server hostname and startcode.
+ *
+ * Uses passed catalog tracker to get a connection to the server hosting
+ * ROOT and makes edits to that region.
+ *
+ * @param hConnection connection we're using
+ * @param regionInfo region to update location of
+ * @param sn Server name
+ * @param openSeqNum the latest sequence number obtained when the region was open
+ * @throws IOException
+ * @throws java.net.ConnectException Usually because the regionserver carrying hbase:meta
+ * is down.
+ * @throws NullPointerException Because no -ROOT- server connection
+ */
+ public static void updateMetaLocation(HConnection hConnection,
+ HRegionInfo regionInfo, ServerName sn, long openSeqNum)
+ throws IOException, ConnectException {
+ updateLocation(hConnection, regionInfo, sn, openSeqNum);
+ }
+
+ /**
+ * Updates the location of the specified region in hbase:meta to be the specified
+ * server hostname and startcode.
+ *
+ * Uses passed catalog tracker to get a connection to the server hosting
+ * hbase:meta and makes edits to that region.
+ *
+ * @param hConnection connection we're using
+ * @param regionInfo region to update location of
+ * @param sn Server name
+ * @throws IOException
+ */
+ public static void updateRegionLocation(HConnection hConnection,
+ HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
+ throws IOException {
+ updateLocation(hConnection, regionInfo, sn, updateSeqNum);
+ }
+
+ /**
+ * Updates the location of the specified region to be the specified server.
+ *
+ * Connects to the specified server which should be hosting the specified
+ * catalog region name to perform the edit.
+ *
+ * @param hConnection connection we're using
+ * @param regionInfo region to update location of
+ * @param sn Server name
+ * @param openSeqNum the latest sequence number obtained when the region was open
+ * @throws IOException In particular could throw {@link java.net.ConnectException}
+ * if the server is down on other end.
+ */
+ private static void updateLocation(final HConnection hConnection,
+ HRegionInfo regionInfo, ServerName sn, long openSeqNum)
+ throws IOException {
+ Put put = new Put(regionInfo.getRegionName());
+ addLocation(put, sn, openSeqNum);
+ putToMetaTable(hConnection, put);
+ LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
+ " with server=" + sn);
+ }
+
+ /**
+ * Deletes the specified region from META.
+ * @param hConnection connection we're using
+ * @param regionInfo region to be deleted from META
+ * @throws IOException
+ */
+ public static void deleteRegion(HConnection hConnection,
+ HRegionInfo regionInfo)
+ throws IOException {
+ Delete delete = new Delete(regionInfo.getRegionName());
+ deleteFromMetaTable(hConnection, delete);
+ LOG.info("Deleted " + regionInfo.getRegionNameAsString());
+ }
+
+ /**
+ * Deletes the specified regions from META.
+ * @param hConnection connection we're using
+ * @param regionsInfo list of regions to be deleted from META
+ * @throws IOException
+ */
+ public static void deleteRegions(HConnection hConnection,
+ List regionsInfo) throws IOException {
+ List deletes = new ArrayList(regionsInfo.size());
+ for (HRegionInfo hri: regionsInfo) {
+ deletes.add(new Delete(hri.getRegionName()));
+ }
+ deleteFromMetaTable(hConnection, deletes);
+ LOG.info("Deleted " + regionsInfo);
+ }
+
+ /**
+ * Adds and Removes the specified regions from hbase:meta
+ * @param hConnection connection we're using
+ * @param regionsToRemove list of regions to be deleted from META
+ * @param regionsToAdd list of regions to be added to META
+ * @throws IOException
+ */
+ public static void mutateRegions(HConnection hConnection,
+ final List regionsToRemove,
+ final List regionsToAdd)
+ throws IOException {
+ List mutation = new ArrayList();
+ if (regionsToRemove != null) {
+ for (HRegionInfo hri: regionsToRemove) {
+ mutation.add(new Delete(hri.getRegionName()));
+ }
+ }
+ if (regionsToAdd != null) {
+ for (HRegionInfo hri: regionsToAdd) {
+ mutation.add(makePutFromRegionInfo(hri));
+ }
+ }
+ mutateMetaTable(hConnection, mutation);
+ if (regionsToRemove != null && regionsToRemove.size() > 0) {
+ LOG.debug("Deleted " + regionsToRemove);
+ }
+ if (regionsToAdd != null && regionsToAdd.size() > 0) {
+ LOG.debug("Added " + regionsToAdd);
+ }
+ }
+
+ /**
+ * Overwrites the specified regions from hbase:meta
+ * @param hConnection connection we're using
+ * @param regionInfos list of regions to be added to META
+ * @throws IOException
+ */
+ public static void overwriteRegions(HConnection hConnection,
+ List regionInfos) throws IOException {
+ deleteRegions(hConnection, regionInfos);
+ // Why sleep? This is the easiest way to ensure that the previous deletes does not
+ // eclipse the following puts, that might happen in the same ts from the server.
+ // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
+ // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
+ Threads.sleep(20);
+ addRegionsToMeta(hConnection, regionInfos);
+ LOG.info("Overwritten " + regionInfos);
+ }
+
+ /**
+ * Deletes merge qualifiers for the specified merged region.
+ * @param hConnection connection we're using
+ * @param mergedRegion
+ * @throws IOException
+ */
+ public static void deleteMergeQualifiers(HConnection hConnection,
+ final HRegionInfo mergedRegion) throws IOException {
+ Delete delete = new Delete(mergedRegion.getRegionName());
+ delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
+ delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
+ deleteFromMetaTable(hConnection, delete);
+ LOG.info("Deleted references in merged region "
+ + mergedRegion.getRegionNameAsString() + ", qualifier="
+ + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
+ + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
+ }
+
+ private static Put addRegionInfo(final Put p, final HRegionInfo hri)
+ throws IOException {
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+ hri.toByteArray());
+ return p;
+ }
+
+ private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ Bytes.toBytes(sn.getHostAndPort()));
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+ Bytes.toBytes(sn.getStartcode()));
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
+ Bytes.toBytes(openSeqNum));
+ return p;
+ }
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 5a2fbbd..2143cd8 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -59,8 +59,7 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -127,7 +126,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
@@ -143,6 +141,8 @@ import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
@@ -232,30 +232,6 @@ public class HBaseAdmin implements Admin {
this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
}
- /**
- * @return A new CatalogTracker instance; call {@link #cleanupCatalogTracker(CatalogTracker)}
- * to cleanup the returned catalog tracker.
- * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
- * @throws IOException
- * @see #cleanupCatalogTracker(CatalogTracker)
- */
- private synchronized CatalogTracker getCatalogTracker()
- throws ZooKeeperConnectionException, IOException {
- CatalogTracker ct = null;
- try {
- ct = new CatalogTracker(this.conf);
- ct.start();
- } catch (InterruptedException e) {
- // Let it out as an IOE for now until we redo all so tolerate IEs
- throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
- }
- return ct;
- }
-
- private void cleanupCatalogTracker(final CatalogTracker ct) {
- ct.stop();
- }
-
@Override
public void abort(String why, Throwable e) {
// Currently does nothing but throw the passed message and exception
@@ -288,16 +264,8 @@ public class HBaseAdmin implements Admin {
* @return True if table exists already.
* @throws IOException
*/
- public boolean tableExists(final TableName tableName)
- throws IOException {
- boolean b = false;
- CatalogTracker ct = getCatalogTracker();
- try {
- b = MetaReader.tableExists(ct, tableName);
- } finally {
- cleanupCatalogTracker(ct);
- }
- return b;
+ public boolean tableExists(final TableName tableName) throws IOException {
+ return MetaTableAccessor.tableExists(connection, tableName);
}
public boolean tableExists(final byte[] tableName)
@@ -659,7 +627,7 @@ public class HBaseAdmin implements Admin {
for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
try {
HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
- Scan scan = MetaReader.getScanForTableName(tableName);
+ Scan scan = MetaTableAccessor.getScanForTableName(tableName);
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
ScanRequest request = RequestConverter.buildScanRequest(
firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
@@ -1362,28 +1330,23 @@ public class HBaseAdmin implements Admin {
* @throws IOException if a remote or network exception occurs
*/
public void closeRegion(final byte [] regionname, final String serverName)
- throws IOException {
- CatalogTracker ct = getCatalogTracker();
- try {
- if (serverName != null) {
- Pair pair = MetaReader.getRegion(ct, regionname);
- if (pair == null || pair.getFirst() == null) {
- throw new UnknownRegionException(Bytes.toStringBinary(regionname));
- } else {
- closeRegion(ServerName.valueOf(serverName), pair.getFirst());
- }
+ throws IOException {
+ if (serverName != null) {
+ Pair pair = MetaTableAccessor.getRegion(connection, regionname);
+ if (pair == null || pair.getFirst() == null) {
+ throw new UnknownRegionException(Bytes.toStringBinary(regionname));
} else {
- Pair pair = MetaReader.getRegion(ct, regionname);
- if (pair == null) {
- throw new UnknownRegionException(Bytes.toStringBinary(regionname));
- } else if (pair.getSecond() == null) {
- throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
- } else {
- closeRegion(pair.getSecond(), pair.getFirst());
- }
+ closeRegion(ServerName.valueOf(serverName), pair.getFirst());
+ }
+ } else {
+ Pair pair = MetaTableAccessor.getRegion(connection, regionname);
+ if (pair == null) {
+ throw new UnknownRegionException(Bytes.toStringBinary(regionname));
+ } else if (pair.getSecond() == null) {
+ throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
+ } else {
+ closeRegion(pair.getSecond(), pair.getFirst());
}
- } finally {
- cleanupCatalogTracker(ct);
}
}
@@ -1477,28 +1440,23 @@ public class HBaseAdmin implements Admin {
*/
public void flush(final byte[] tableNameOrRegionName)
throws IOException, InterruptedException {
- CatalogTracker ct = getCatalogTracker();
- try {
- Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
- if (regionServerPair != null) {
- if (regionServerPair.getSecond() == null) {
- throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
- } else {
- flush(regionServerPair.getSecond(), regionServerPair.getFirst());
- }
+ Pair regionServerPair
+ = getRegion(tableNameOrRegionName);
+ if (regionServerPair != null) {
+ if (regionServerPair.getSecond() == null) {
+ throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
} else {
- final TableName tableName = checkTableExists(
- TableName.valueOf(tableNameOrRegionName), ct);
- if (isTableDisabled(tableName)) {
- LOG.info("Table is disabled: " + tableName.getNameAsString());
- return;
- }
- execProcedure("flush-table-proc", tableName.getNameAsString(),
- new HashMap());
+ flush(regionServerPair.getSecond(), regionServerPair.getFirst());
}
- } finally {
- cleanupCatalogTracker(ct);
+ } else {
+ final TableName tableName = checkTableExists(
+ TableName.valueOf(tableNameOrRegionName));
+ if (isTableDisabled(tableName)) {
+ LOG.info("Table is disabled: " + tableName.getNameAsString());
+ return;
+ }
+ execProcedure("flush-table-proc", tableName.getNameAsString(),
+ new HashMap());
}
}
@@ -1636,10 +1594,25 @@ public class HBaseAdmin implements Admin {
private void compact(final byte[] tableNameOrRegionName,
final byte[] columnFamily,final boolean major)
throws IOException, InterruptedException {
- CatalogTracker ct = getCatalogTracker();
+ Abortable throwableAborter = new Abortable() {
+
+ @Override
+ public void abort(String why, Throwable e) {
+ throw new RuntimeException(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return true;
+ }
+ };
+
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(),
+ throwableAborter);
try {
Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
+ = getRegion(tableNameOrRegionName);
if (regionServerPair != null) {
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -1648,10 +1621,10 @@ public class HBaseAdmin implements Admin {
}
} else {
final TableName tableName =
- checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+ checkTableExists(TableName.valueOf(tableNameOrRegionName));
List> pairs =
- MetaReader.getTableRegionsAndLocations(ct,
- tableName);
+ MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
+ tableName);
for (Pair pair: pairs) {
if (pair.getFirst().isOffline()) continue;
if (pair.getSecond() == null) continue;
@@ -1667,7 +1640,7 @@ public class HBaseAdmin implements Admin {
}
}
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
}
@@ -1964,10 +1937,25 @@ public class HBaseAdmin implements Admin {
*/
public void split(final byte[] tableNameOrRegionName,
final byte [] splitPoint) throws IOException, InterruptedException {
- CatalogTracker ct = getCatalogTracker();
+ Abortable throwableAborter = new Abortable() {
+
+ @Override
+ public void abort(String why, Throwable e) {
+ throw new RuntimeException(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return true;
+ }
+ };
+
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(),
+ throwableAborter);
try {
Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
+ = getRegion(tableNameOrRegionName);
if (regionServerPair != null) {
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -1976,10 +1964,10 @@ public class HBaseAdmin implements Admin {
}
} else {
final TableName tableName =
- checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+ checkTableExists(TableName.valueOf(tableNameOrRegionName));
List> pairs =
- MetaReader.getTableRegionsAndLocations(ct,
- tableName);
+ MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
+ tableName);
for (Pair pair: pairs) {
// May not be a server for a particular row
if (pair.getSecond() == null) continue;
@@ -1993,7 +1981,7 @@ public class HBaseAdmin implements Admin {
}
}
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
}
@@ -2045,19 +2033,18 @@ public class HBaseAdmin implements Admin {
/**
* @param tableNameOrRegionName Name of a table or name of a region.
- * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
* @return a pair of HRegionInfo and ServerName if tableNameOrRegionName is
- * a verified region name (we call {@link MetaReader#getRegion( CatalogTracker, byte[])}
+ * a verified region name (we call {@link
+ * org.apache.hadoop.hbase.catalog.MetaTableAccessor#getRegion(HConnection, byte[])}
* else null.
* Throw an exception if tableNameOrRegionName is null.
* @throws IOException
*/
- Pair getRegion(final byte[] tableNameOrRegionName,
- final CatalogTracker ct) throws IOException {
+ Pair getRegion(final byte[] tableNameOrRegionName) throws IOException {
if (tableNameOrRegionName == null) {
throw new IllegalArgumentException("Pass a table name or region name");
}
- Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName);
+ Pair pair = MetaTableAccessor.getRegion(connection, tableNameOrRegionName);
if (pair == null) {
final AtomicReference> result =
new AtomicReference>(null);
@@ -2099,16 +2086,10 @@ public class HBaseAdmin implements Admin {
HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
}
- CatalogTracker ct = getCatalogTracker();
byte[] tmp = regionNameOrEncodedRegionName;
- try {
- Pair regionServerPair
- = getRegion(regionNameOrEncodedRegionName, ct);
- if (regionServerPair != null && regionServerPair.getFirst() != null) {
- tmp = regionServerPair.getFirst().getRegionName();
- }
- } finally {
- cleanupCatalogTracker(ct);
+ Pair regionServerPair = getRegion(regionNameOrEncodedRegionName);
+ if (regionServerPair != null && regionServerPair.getFirst() != null) {
+ tmp = regionServerPair.getFirst().getRegionName();
}
return tmp;
}
@@ -2116,16 +2097,13 @@ public class HBaseAdmin implements Admin {
/**
* Check if table exists or not
* @param tableName Name of a table.
- * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
* @return tableName instance
* @throws IOException if a remote or network exception occurs.
* @throws TableNotFoundException if table does not exist.
*/
- //TODO rename this method
- private TableName checkTableExists(
- final TableName tableName, CatalogTracker ct)
+ private TableName checkTableExists(final TableName tableName)
throws IOException {
- if (!MetaReader.tableExists(ct, tableName)) {
+ if (!MetaTableAccessor.tableExists(connection, tableName)) {
throw new TableNotFoundException(tableName);
}
return tableName;
@@ -2408,12 +2386,27 @@ public class HBaseAdmin implements Admin {
*/
public List getTableRegions(final TableName tableName)
throws IOException {
- CatalogTracker ct = getCatalogTracker();
+ Abortable throwableAborter = new Abortable() {
+
+ @Override
+ public void abort(String why, Throwable e) {
+ throw new RuntimeException(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return true;
+ }
+ };
+
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(),
+ throwableAborter);
List Regions = null;
try {
- Regions = MetaReader.getTableRegions(ct, tableName, true);
+ Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
return Regions;
}
@@ -2524,10 +2517,24 @@ public class HBaseAdmin implements Admin {
public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
throws IOException, InterruptedException {
CompactionState state = CompactionState.NONE;
- CatalogTracker ct = getCatalogTracker();
+ Abortable throwableAborter = new Abortable() {
+
+ @Override
+ public void abort(String why, Throwable e) {
+ throw new RuntimeException(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return true;
+ }
+ };
+
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(),
+ throwableAborter);
try {
- Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
+ Pair regionServerPair = getRegion(tableNameOrRegionName);
if (regionServerPair != null) {
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -2541,9 +2548,9 @@ public class HBaseAdmin implements Admin {
}
} else {
final TableName tableName =
- checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+ checkTableExists(TableName.valueOf(tableNameOrRegionName));
List> pairs =
- MetaReader.getTableRegionsAndLocations(ct, tableName);
+ MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
for (Pair pair: pairs) {
if (pair.getFirst().isOffline()) continue;
if (pair.getSecond() == null) continue;
@@ -2592,7 +2599,7 @@ public class HBaseAdmin implements Admin {
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
return state;
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index 0305821..0d73bcd 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
@@ -49,7 +48,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
* connections are managed at a lower level.
*
* HConnections are used by {@link HTable} mostly but also by
- * {@link HBaseAdmin}, and {@link CatalogTracker}. HConnection instances can be shared. Sharing
+ * {@link HBaseAdmin}, and {@link org.apache.hadoop.hbase.zookeeper.MetaRegionLocator}.
+ * HConnection instances can be shared. Sharing
* is usually what you want because rather than each HConnection instance
* having to do its own discovery of regions out on the cluster, instead, all
* clients get to share the one cache of locations. {@link HConnectionManager} does the
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 529b7f6..674d552 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -56,7 +56,7 @@ class ZooKeeperRegistry implements Registry {
if (LOG.isTraceEnabled()) {
LOG.trace("Looking up meta region location in ZK," + " connection=" + this);
}
- ServerName servername = MetaRegionTracker.blockUntilAvailable(zkw, hci.rpcTimeout);
+ ServerName servername = MetaRegionLocator.blockUntilAvailable(zkw, hci.rpcTimeout);
if (LOG.isTraceEnabled()) {
LOG.trace("Looked up meta region location, connection=" + this +
"; serverName=" + ((servername == null) ? "null" : servername));
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionLocator.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionLocator.java
new file mode 100644
index 0000000..c9bb594
--- /dev/null
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionLocator.java
@@ -0,0 +1,392 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import com.google.common.base.Stopwatch;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.NoRouteToHostException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.rmi.UnknownHostException;
+
+/**
+ * Util class to get meta region server location node in zookeeper.
+ *
+ * Meta region location is set by RegionServerServices.
+ * This class doesn't use set ZK watcher, rather reads data directly when requested.
+ *
+ * //TODO: rewrite using RPC calls to master to find out about hbase:meta.
+ */
+@InterfaceAudience.Private
+public class MetaRegionLocator {
+ private static final Log LOG = LogFactory.getLog(MetaRegionLocator.class);
+
+ /* Can't instantiate util class */
+ private MetaRegionLocator() {}
+
+ static final byte [] META_REGION_NAME =
+ HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
+
+ /**
+ * Checks if the meta region location is available.
+ * @return true if meta region location is available, false if not
+ */
+ public static boolean isLocationAvailable(ZooKeeperWatcher zkw) {
+ try {
+ return ZKUtil.getData(zkw, zkw.metaServerZNode) != null;
+ } catch(KeeperException e) {
+ LOG.error("ZK error trying to get hbase:meta from ZooKeeper");
+ return false;
+ } catch (InterruptedException e) {
+ LOG.error("ZK error trying to get hbase:meta from ZooKeeper");
+ return false;
+ }
+ }
+
+ /**
+ * Gets the meta region location, if available. Does not block. Does not set
+ * a watcher (In this regard it differs from {@link #getMetaRegionLocation}.
+ * @param zkw zookeeper connection to use
+ * @return server name or null if we failed to get the data.
+ */
+ public static ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw) {
+ try {
+ try {
+ return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode));
+ } catch (DeserializationException e) {
+ throw ZKUtil.convert(e);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return null;
+ }
+ } catch (KeeperException ke) {
+ return null;
+ }
+ }
+
+ /**
+ * Gets the meta region location, if available, and waits for up to the
+ * specified timeout if not immediately available.
+ * Given the zookeeper notification could be delayed, we will try to
+ * get the latest data.
+ * @param timeout maximum time to wait, in millis
+ * @return server name for server hosting meta region formatted as per
+ * {@link ServerName}, or null if none available
+ * @throws InterruptedException if interrupted while waiting
+ */
+ public static ServerName waitMetaRegionLocation(ZooKeeperWatcher zkw, long timeout)
+ throws InterruptedException {
+ try {
+ if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) {
+ String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
+ + "There could be a mismatch with the one configured in the master.";
+ LOG.error(errorMsg);
+ throw new IllegalArgumentException(errorMsg);
+ }
+ } catch (KeeperException e) {
+ throw new IllegalStateException("KeeperException while trying to check baseZNode:", e);
+ }
+ return blockUntilAvailable(zkw, timeout);
+ }
+
+ /**
+ * Waits indefinitely for availability of hbase:meta. Used during
+ * cluster startup. Does not verify meta, just that something has been
+ * set up in zk.
+ * @see #waitMetaRegionLocation(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher, long)
+ * @throws InterruptedException if interrupted while waiting
+ */
+ public static void waitMetaRegionLocation(ZooKeeperWatcher zkw) throws InterruptedException {
+ Stopwatch stopwatch = new Stopwatch().start();
+ while (true) {
+ if (waitMetaRegionLocation(zkw, 100) != null) break;
+ long sleepTime = stopwatch.elapsedMillis();
+ // +1 in case sleepTime=0
+ if ((sleepTime + 1) % 10000 == 0) {
+ LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
+ }
+ }
+ }
+
+ /**
+ * Verify hbase:meta is deployed and accessible.
+ * @param timeout How long to wait on zk for meta address (passed through to
+ * the internal call to {@link #waitForMetaServerConnection(long)}.
+ * @return True if the hbase:meta location is healthy.
+ * @throws java.io.IOException
+ * @throws InterruptedException
+ */
+ public static boolean verifyMetaRegionLocation(HConnection hConnection,
+ ZooKeeperWatcher zkw, final long timeout)
+ throws InterruptedException, IOException {
+ AdminProtos.AdminService.BlockingInterface service = null;
+ try {
+ service = waitForMetaServerConnection(hConnection, zkw, timeout);
+ } catch (NotAllMetaRegionsOnlineException e) {
+ // Pass
+ } catch (ServerNotRunningYetException e) {
+ // Pass -- remote server is not up so can't be carrying root
+ } catch (UnknownHostException e) {
+ // Pass -- server name doesn't resolve so it can't be assigned anything.
+ } catch (RegionServerStoppedException e) {
+ // Pass -- server name sends us to a server that is dying or already dead.
+ }
+ return (service == null)? false:
+ verifyRegionLocation(service,
+ getMetaRegionLocation(zkw), META_REGION_NAME);
+ }
+
+ /**
+ * Verify we can connect to hostingServer and that its carrying
+ * regionName.
+ * @param hostingServer Interface to the server hosting regionName
+ * @param address The servername that goes with the metaServer
+ * Interface. Used logging.
+ * @param regionName The regionname we are interested in.
+ * @return True if we were able to verify the region located at other side of
+ * the Interface.
+ * @throws IOException
+ */
+ // TODO: We should be able to get the ServerName from the AdminProtocol
+ // rather than have to pass it in. Its made awkward by the fact that the
+ // HRI is likely a proxy against remote server so the getServerName needs
+ // to be fixed to go to a local method or to a cache before we can do this.
+ private static boolean verifyRegionLocation(AdminService.BlockingInterface hostingServer,
+ final ServerName address, final byte [] regionName)
+ throws IOException {
+ if (hostingServer == null) {
+ LOG.info("Passed hostingServer is null");
+ return false;
+ }
+ Throwable t = null;
+ try {
+ // Try and get regioninfo from the hosting server.
+ return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null;
+ } catch (ConnectException e) {
+ t = e;
+ } catch (RetriesExhaustedException e) {
+ t = e;
+ } catch (RemoteException e) {
+ IOException ioe = e.unwrapRemoteException();
+ t = ioe;
+ } catch (IOException e) {
+ Throwable cause = e.getCause();
+ if (cause != null && cause instanceof EOFException) {
+ t = cause;
+ } else if (cause != null && cause.getMessage() != null
+ && cause.getMessage().contains("Connection reset")) {
+ t = cause;
+ } else {
+ t = e;
+ }
+ }
+ LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
+ " at address=" + address + ", exception=" + t);
+ return false;
+ }
+
+ /**
+ * Gets a connection to the server hosting meta, as reported by ZooKeeper,
+ * waiting up to the specified timeout for availability.
+ * @param timeout How long to wait on meta location
+ * @see #waitForMeta for additional information
+ * @return connection to server hosting meta
+ * @throws InterruptedException
+ * @throws NotAllMetaRegionsOnlineException if timed out waiting
+ * @throws IOException
+ * @deprecated Use #getMetaServerConnection(long)
+ */
+ public static AdminService.BlockingInterface waitForMetaServerConnection(HConnection hConnection,
+ ZooKeeperWatcher zkw, long timeout)
+ throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
+ return getMetaServerConnection(hConnection, zkw, timeout);
+ }
+
+ /**
+ * Gets a connection to the server hosting meta, as reported by ZooKeeper,
+ * waiting up to the specified timeout for availability.
+ *
WARNING: Does not retry. Use an {@link org.apache.hadoop.hbase.client.HTable} instead.
+ * @param timeout How long to wait on meta location
+ * @see #waitForMeta for additional information
+ * @return connection to server hosting meta
+ * @throws InterruptedException
+ * @throws NotAllMetaRegionsOnlineException if timed out waiting
+ * @throws IOException
+ */
+ static AdminService.BlockingInterface getMetaServerConnection(HConnection hConnection,
+ ZooKeeperWatcher zkw, long timeout)
+ throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
+ return getCachedConnection(hConnection, waitMetaRegionLocation(zkw, timeout));
+ }
+
+ /**
+ * @param sn ServerName to get a connection against.
+ * @return The AdminProtocol we got when we connected to sn
+ * May have come from cache, may not be good, may have been setup by this
+ * invocation, or may be null.
+ * @throws IOException
+ */
+ @SuppressWarnings("deprecation")
+ private static AdminService.BlockingInterface getCachedConnection(HConnection hConnection,
+ ServerName sn)
+ throws IOException {
+ if (sn == null) {
+ return null;
+ }
+ AdminService.BlockingInterface service = null;
+ try {
+ service = hConnection.getAdmin(sn);
+ } catch (RetriesExhaustedException e) {
+ if (e.getCause() != null && e.getCause() instanceof ConnectException) {
+ // Catch this; presume it means the cached connection has gone bad.
+ } else {
+ throw e;
+ }
+ } catch (SocketTimeoutException e) {
+ LOG.debug("Timed out connecting to " + sn);
+ } catch (NoRouteToHostException e) {
+ LOG.debug("Connecting to " + sn, e);
+ } catch (SocketException e) {
+ LOG.debug("Exception connecting to " + sn);
+ } catch (UnknownHostException e) {
+ LOG.debug("Unknown host exception connecting to " + sn);
+ } catch (RpcClient.FailedServerException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Server " + sn + " is in failed server list.");
+ }
+ } catch (IOException ioe) {
+ Throwable cause = ioe.getCause();
+ if (ioe instanceof ConnectException) {
+ // Catch. Connect refused.
+ } else if (cause != null && cause instanceof EOFException) {
+ // Catch. Other end disconnected us.
+ } else if (cause != null && cause.getMessage() != null &&
+ cause.getMessage().toLowerCase().contains("connection reset")) {
+ // Catch. Connection reset.
+ } else {
+ throw ioe;
+ }
+
+ }
+ return service;
+ }
+
+ /**
+ * Sets the location of hbase:meta in ZooKeeper to the
+ * specified server address.
+ * @param zookeeper zookeeper reference
+ * @param location The server hosting hbase:meta
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public static void setMetaLocation(ZooKeeperWatcher zookeeper,
+ final ServerName location)
+ throws KeeperException {
+ LOG.info("Setting hbase:meta region location in ZooKeeper as " + location);
+ // Make the MetaRegionServer pb and then get its bytes and save this as
+ // the znode content.
+ byte [] data = toByteArray(location);
+ try {
+ ZKUtil.createAndWatch(zookeeper, zookeeper.metaServerZNode, data);
+ } catch(KeeperException.NodeExistsException nee) {
+ LOG.debug("META region location already existed, updated location");
+ ZKUtil.setData(zookeeper, zookeeper.metaServerZNode, data);
+ }
+ }
+
+ /**
+ * Build up the znode content.
+ * @param sn What to put into the znode.
+ * @return The content of the meta-region-server znode
+ */
+ static byte [] toByteArray(final ServerName sn) {
+ // ZNode content is a pb message preceded by some pb magic.
+ HBaseProtos.ServerName pbsn =
+ HBaseProtos.ServerName.newBuilder()
+ .setHostName(sn.getHostname())
+ .setPort(sn.getPort())
+ .setStartCode(sn.getStartcode())
+ .build();
+
+ ZooKeeperProtos.MetaRegionServer pbrsr =
+ ZooKeeperProtos.MetaRegionServer.newBuilder()
+ .setServer(pbsn)
+ .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
+ .build();
+ return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
+ }
+
+ /**
+ * Deletes the location of hbase:meta in ZooKeeper.
+ * @param zookeeper zookeeper reference
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public static void deleteMetaLocation(ZooKeeperWatcher zookeeper)
+ throws KeeperException {
+ LOG.info("Unsetting hbase:meta region location in ZooKeeper");
+ try {
+ // Just delete the node. Don't need any watches.
+ ZKUtil.deleteNode(zookeeper, zookeeper.metaServerZNode);
+ } catch(KeeperException.NoNodeException nne) {
+ // Has already been deleted
+ }
+ }
+
+ /**
+ * Wait until the meta region is available.
+ * @param zkw
+ * @param timeout
+ * @return ServerName or null if we timed out.
+ * @throws InterruptedException
+ */
+ public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
+ final long timeout)
+ throws InterruptedException {
+ byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.metaServerZNode, timeout);
+ if (data == null) return null;
+ try {
+ return ServerName.parseFrom(data);
+ } catch (DeserializationException e) {
+ LOG.warn("Failed parse", e);
+ return null;
+ }
+ }
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
deleted file mode 100644
index a3a7d02..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Tracks the meta region server location node in zookeeper.
- * Meta region location is set by RegionServerServices.
- * This class has a watcher on the meta location and notices changes.
- */
-@InterfaceAudience.Private
-public class MetaRegionTracker extends ZooKeeperNodeTracker {
- /**
- * Creates a meta region location tracker.
- *
- *
After construction, use {@link #start} to kick off tracking.
- *
- * @param watcher
- * @param abortable
- */
- public MetaRegionTracker(ZooKeeperWatcher watcher, Abortable abortable) {
- super(watcher, watcher.metaServerZNode, abortable);
- }
-
- /**
- * Checks if the meta region location is available.
- * @return true if meta region location is available, false if not
- */
- public boolean isLocationAvailable() {
- return super.getData(true) != null;
- }
-
- /**
- * Gets the meta region location, if available. Does not block. Sets a watcher.
- * @return server name or null if we failed to get the data.
- * @throws InterruptedException
- */
- public ServerName getMetaRegionLocation() throws InterruptedException {
- try {
- return ServerName.parseFrom(super.getData(true));
- } catch (DeserializationException e) {
- LOG.warn("Failed parse", e);
- return null;
- }
- }
-
- /**
- * Gets the meta region location, if available. Does not block. Does not set
- * a watcher (In this regard it differs from {@link #getMetaRegionLocation}.
- * @param zkw
- * @return server name or null if we failed to get the data.
- * @throws KeeperException
- */
- public static ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw)
- throws KeeperException {
- try {
- return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode));
- } catch (DeserializationException e) {
- throw ZKUtil.convert(e);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- return null;
- }
- }
-
- /**
- * Gets the meta region location, if available, and waits for up to the
- * specified timeout if not immediately available.
- * Given the zookeeper notification could be delayed, we will try to
- * get the latest data.
- * @param timeout maximum time to wait, in millis
- * @return server name for server hosting meta region formatted as per
- * {@link ServerName}, or null if none available
- * @throws InterruptedException if interrupted while waiting
- */
- public ServerName waitMetaRegionLocation(long timeout)
- throws InterruptedException {
- if (false == checkIfBaseNodeAvailable()) {
- String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
- + "There could be a mismatch with the one configured in the master.";
- LOG.error(errorMsg);
- throw new IllegalArgumentException(errorMsg);
- }
- try {
- return ServerName.parseFrom(super.blockUntilAvailable(timeout, true));
- } catch (DeserializationException e) {
- LOG.warn("Failed parse", e);
- return null;
- }
- }
-
- /**
- * Sets the location of hbase:meta in ZooKeeper to the
- * specified server address.
- * @param zookeeper zookeeper reference
- * @param location The server hosting hbase:meta
- * @throws KeeperException unexpected zookeeper exception
- */
- public static void setMetaLocation(ZooKeeperWatcher zookeeper,
- final ServerName location)
- throws KeeperException {
- LOG.info("Setting hbase:meta region location in ZooKeeper as " + location);
- // Make the MetaRegionServer pb and then get its bytes and save this as
- // the znode content.
- byte [] data = toByteArray(location);
- try {
- ZKUtil.createAndWatch(zookeeper, zookeeper.metaServerZNode, data);
- } catch(KeeperException.NodeExistsException nee) {
- LOG.debug("META region location already existed, updated location");
- ZKUtil.setData(zookeeper, zookeeper.metaServerZNode, data);
- }
- }
-
- /**
- * Build up the znode content.
- * @param sn What to put into the znode.
- * @return The content of the meta-region-server znode
- */
- static byte [] toByteArray(final ServerName sn) {
- // ZNode content is a pb message preceded by some pb magic.
- HBaseProtos.ServerName pbsn =
- HBaseProtos.ServerName.newBuilder()
- .setHostName(sn.getHostname())
- .setPort(sn.getPort())
- .setStartCode(sn.getStartcode())
- .build();
-
- ZooKeeperProtos.MetaRegionServer pbrsr =
- ZooKeeperProtos.MetaRegionServer.newBuilder()
- .setServer(pbsn)
- .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
- .build();
- return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
- }
-
- /**
- * Deletes the location of hbase:meta in ZooKeeper.
- * @param zookeeper zookeeper reference
- * @throws KeeperException unexpected zookeeper exception
- */
- public static void deleteMetaLocation(ZooKeeperWatcher zookeeper)
- throws KeeperException {
- LOG.info("Unsetting hbase:meta region location in ZooKeeper");
- try {
- // Just delete the node. Don't need any watches.
- ZKUtil.deleteNode(zookeeper, zookeeper.metaServerZNode);
- } catch(KeeperException.NoNodeException nne) {
- // Has already been deleted
- }
- }
-
- /**
- * Wait until the meta region is available.
- * @param zkw
- * @param timeout
- * @return ServerName or null if we timed out.
- * @throws InterruptedException
- */
- public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
- final long timeout)
- throws InterruptedException {
- byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.metaServerZNode, timeout);
- if (data == null) return null;
- try {
- return ServerName.parseFrom(data);
- } catch (DeserializationException e) {
- LOG.warn("Failed parse", e);
- return null;
- }
- }
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index a68d8c9..68302d6 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent;
@@ -1590,7 +1589,7 @@ public class ZKUtil {
zkw.backupMasterAddressesZNode)) {
sb.append("\n ").append(child);
}
- sb.append("\nRegion server holding hbase:meta: " + MetaRegionTracker.getMetaRegionLocation(zkw));
+ sb.append("\nRegion server holding hbase:meta: " + MetaRegionLocator.getMetaRegionLocation(zkw));
sb.append("\nRegion servers:");
for (String child : listChildrenNoWatch(zkw, zkw.rsZNode)) {
sb.append("\n ").append(child);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
deleted file mode 100644
index 89fa0a9..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.net.ConnectException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * Writes region and assignment information to hbase:meta.
- * TODO: Put MetaReader and MetaEditor together; doesn't make sense having
- * them distinct. see HBASE-3475.
- */
-@InterfaceAudience.Private
-public class MetaEditor {
- // TODO: Strip CatalogTracker from this class. Its all over and in the end
- // its only used to get its Configuration so we can get associated
- // Connection.
- private static final Log LOG = LogFactory.getLog(MetaEditor.class);
-
- /**
- * Generates and returns a Put containing the region into for the catalog table
- */
- public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
- throws IOException {
- Put put = new Put(regionInfo.getRegionName());
- addRegionInfo(put, regionInfo);
- return put;
- }
-
- /**
- * Generates and returns a Delete containing the region info for the catalog
- * table
- */
- public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
- if (regionInfo == null) {
- throw new IllegalArgumentException("Can't make a delete for null region");
- }
- Delete delete = new Delete(regionInfo.getRegionName());
- return delete;
- }
-
- /**
- * Adds split daughters to the Put
- */
- public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
- if (splitA != null) {
- put.addImmutable(
- HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
- }
- if (splitB != null) {
- put.addImmutable(
- HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
- }
- return put;
- }
-
- /**
- * Put the passed p to the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param p Put to add to hbase:meta
- * @throws IOException
- */
- static void putToMetaTable(final CatalogTracker ct, final Put p)
- throws IOException {
- put(MetaReader.getMetaHTable(ct), p);
- }
-
- /**
- * Put the passed p to a catalog table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param p Put to add
- * @throws IOException
- */
- static void putToCatalogTable(final CatalogTracker ct, final Put p)
- throws IOException {
- put(MetaReader.getCatalogHTable(ct), p);
- }
-
- /**
- * @param t Table to use (will be closed when done).
- * @param p
- * @throws IOException
- */
- private static void put(final HTable t, final Put p) throws IOException {
- try {
- t.put(p);
- } finally {
- t.close();
- }
- }
-
- /**
- * Put the passed ps to the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param ps Put to add to hbase:meta
- * @throws IOException
- */
- public static void putsToMetaTable(final CatalogTracker ct, final List ps)
- throws IOException {
- HTable t = MetaReader.getMetaHTable(ct);
- try {
- t.put(ps);
- } finally {
- t.close();
- }
- }
-
- /**
- * Delete the passed d from the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param d Delete to add to hbase:meta
- * @throws IOException
- */
- static void deleteFromMetaTable(final CatalogTracker ct, final Delete d)
- throws IOException {
- List dels = new ArrayList(1);
- dels.add(d);
- deleteFromMetaTable(ct, dels);
- }
-
- /**
- * Delete the passed deletes from the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param deletes Deletes to add to hbase:meta This list should support #remove.
- * @throws IOException
- */
- public static void deleteFromMetaTable(final CatalogTracker ct, final List deletes)
- throws IOException {
- HTable t = MetaReader.getMetaHTable(ct);
- try {
- t.delete(deletes);
- } finally {
- t.close();
- }
- }
-
- /**
- * Execute the passed mutations against hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param mutations Puts and Deletes to execute on hbase:meta
- * @throws IOException
- */
- public static void mutateMetaTable(final CatalogTracker ct, final List mutations)
- throws IOException {
- HTable t = MetaReader.getMetaHTable(ct);
- try {
- t.batch(mutations);
- } catch (InterruptedException e) {
- InterruptedIOException ie = new InterruptedIOException(e.getMessage());
- ie.initCause(e);
- throw ie;
- } finally {
- t.close();
- }
- }
-
- /**
- * Adds a hbase:meta row for the specified new region.
- * @param regionInfo region information
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(CatalogTracker catalogTracker,
- HRegionInfo regionInfo)
- throws IOException {
- putToMetaTable(catalogTracker, makePutFromRegionInfo(regionInfo));
- LOG.info("Added " + regionInfo.getRegionNameAsString());
- }
-
- /**
- * Adds a hbase:meta row for the specified new region to the given catalog table. The
- * HTable is not flushed or closed.
- * @param meta the HTable for META
- * @param regionInfo region information
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo) throws IOException {
- addRegionToMeta(meta, regionInfo, null, null);
- }
-
- /**
- * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
- * not add its daughter's as different rows, but adds information about the daughters
- * in the same row as the parent. Use
- * {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
- * if you want to do that.
- * @param meta the HTable for META
- * @param regionInfo region information
- * @param splitA first split daughter of the parent regionInfo
- * @param splitB second split daughter of the parent regionInfo
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo,
- HRegionInfo splitA, HRegionInfo splitB) throws IOException {
- Put put = makePutFromRegionInfo(regionInfo);
- addDaughtersToPut(put, splitA, splitB);
- meta.put(put);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Added " + regionInfo.getRegionNameAsString());
- }
- }
-
- /**
- * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
- * not add its daughter's as different rows, but adds information about the daughters
- * in the same row as the parent. Use
- * {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
- * if you want to do that.
- * @param catalogTracker CatalogTracker on whose back we will ride the edit.
- * @param regionInfo region information
- * @param splitA first split daughter of the parent regionInfo
- * @param splitB second split daughter of the parent regionInfo
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(CatalogTracker catalogTracker, HRegionInfo regionInfo,
- HRegionInfo splitA, HRegionInfo splitB) throws IOException {
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- addRegionToMeta(meta, regionInfo, splitA, splitB);
- } finally {
- meta.close();
- }
- }
-
- /**
- * Adds a hbase:meta row for each of the specified new regions.
- * @param catalogTracker CatalogTracker
- * @param regionInfos region information list
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionsToMeta(CatalogTracker catalogTracker,
- List regionInfos)
- throws IOException {
- List puts = new ArrayList();
- for (HRegionInfo regionInfo : regionInfos) {
- puts.add(makePutFromRegionInfo(regionInfo));
- }
- putsToMetaTable(catalogTracker, puts);
- LOG.info("Added " + puts.size());
- }
-
- /**
- * Adds a daughter region entry to meta.
- * @param regionInfo the region to put
- * @param sn the location of the region
- * @param openSeqNum the latest sequence number obtained when the region was open
- */
- public static void addDaughter(final CatalogTracker catalogTracker,
- final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
- throws NotAllMetaRegionsOnlineException, IOException {
- Put put = new Put(regionInfo.getRegionName());
- addRegionInfo(put, regionInfo);
- if (sn != null) {
- addLocation(put, sn, openSeqNum);
- }
- putToMetaTable(catalogTracker, put);
- LOG.info("Added daughter " + regionInfo.getEncodedName() +
- (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
- }
-
- /**
- * Merge the two regions into one in an atomic operation. Deletes the two
- * merging regions in hbase:meta and adds the merged region with the information of
- * two merging regions.
- * @param catalogTracker the catalog tracker
- * @param mergedRegion the merged region
- * @param regionA
- * @param regionB
- * @param sn the location of the region
- * @throws IOException
- */
- public static void mergeRegions(final CatalogTracker catalogTracker,
- HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
- ServerName sn) throws IOException {
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
-
- // Put for parent
- Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
- putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
- regionA.toByteArray());
- putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
- regionB.toByteArray());
-
- // Deletes for merging regions
- Delete deleteA = makeDeleteFromRegionInfo(regionA);
- Delete deleteB = makeDeleteFromRegionInfo(regionB);
-
- // The merged is a new region, openSeqNum = 1 is fine.
- addLocation(putOfMerged, sn, 1);
-
- byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
- + HConstants.DELIMITER);
- multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
- } finally {
- meta.close();
- }
- }
-
- /**
- * Splits the region into two in an atomic operation. Offlines the parent
- * region with the information that it is split into two, and also adds
- * the daughter regions. Does not add the location information to the daughter
- * regions since they are not open yet.
- * @param catalogTracker the catalog tracker
- * @param parent the parent region which is split
- * @param splitA Split daughter region A
- * @param splitB Split daughter region A
- * @param sn the location of the region
- */
- public static void splitRegion(final CatalogTracker catalogTracker,
- HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
- ServerName sn) throws IOException {
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- HRegionInfo copyOfParent = new HRegionInfo(parent);
- copyOfParent.setOffline(true);
- copyOfParent.setSplit(true);
-
- //Put for parent
- Put putParent = makePutFromRegionInfo(copyOfParent);
- addDaughtersToPut(putParent, splitA, splitB);
-
- //Puts for daughters
- Put putA = makePutFromRegionInfo(splitA);
- Put putB = makePutFromRegionInfo(splitB);
-
- addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
- addLocation(putB, sn, 1);
-
- byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
- multiMutate(meta, tableRow, putParent, putA, putB);
- } finally {
- meta.close();
- }
- }
-
- /**
- * Performs an atomic multi-Mutate operation against the given table.
- */
- private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
- CoprocessorRpcChannel channel = table.coprocessorService(row);
- MutateRowsRequest.Builder mmrBuilder = MutateRowsRequest.newBuilder();
- for (Mutation mutation : mutations) {
- if (mutation instanceof Put) {
- mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
- } else if (mutation instanceof Delete) {
- mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
- } else {
- throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
- + mutation.getClass().getName());
- }
- }
-
- MultiRowMutationService.BlockingInterface service =
- MultiRowMutationService.newBlockingStub(channel);
- try {
- service.mutateRows(null, mmrBuilder.build());
- } catch (ServiceException ex) {
- ProtobufUtil.toIOException(ex);
- }
- }
-
-
- /**
- * Updates the location of the specified hbase:meta region in ROOT to be the
- * specified server hostname and startcode.
- *
- * Uses passed catalog tracker to get a connection to the server hosting
- * ROOT and makes edits to that region.
- *
- * @param catalogTracker catalog tracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @param openSeqNum the latest sequence number obtained when the region was open
- * @throws IOException
- * @throws ConnectException Usually because the regionserver carrying hbase:meta
- * is down.
- * @throws NullPointerException Because no -ROOT- server connection
- */
- public static void updateMetaLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long openSeqNum)
- throws IOException, ConnectException {
- updateLocation(catalogTracker, regionInfo, sn, openSeqNum);
- }
-
- /**
- * Updates the location of the specified region in hbase:meta to be the specified
- * server hostname and startcode.
- *
- * Uses passed catalog tracker to get a connection to the server hosting
- * hbase:meta and makes edits to that region.
- *
- * @param catalogTracker catalog tracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @throws IOException
- */
- public static void updateRegionLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
- throws IOException {
- updateLocation(catalogTracker, regionInfo, sn, updateSeqNum);
- }
-
- /**
- * Updates the location of the specified region to be the specified server.
- *
- * Connects to the specified server which should be hosting the specified
- * catalog region name to perform the edit.
- *
- * @param catalogTracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @param openSeqNum the latest sequence number obtained when the region was open
- * @throws IOException In particular could throw {@link java.net.ConnectException}
- * if the server is down on other end.
- */
- private static void updateLocation(final CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long openSeqNum)
- throws IOException {
- Put put = new Put(regionInfo.getRegionName());
- addLocation(put, sn, openSeqNum);
- putToCatalogTable(catalogTracker, put);
- LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
- " with server=" + sn);
- }
-
- /**
- * Deletes the specified region from META.
- * @param catalogTracker
- * @param regionInfo region to be deleted from META
- * @throws IOException
- */
- public static void deleteRegion(CatalogTracker catalogTracker,
- HRegionInfo regionInfo)
- throws IOException {
- Delete delete = new Delete(regionInfo.getRegionName());
- deleteFromMetaTable(catalogTracker, delete);
- LOG.info("Deleted " + regionInfo.getRegionNameAsString());
- }
-
- /**
- * Deletes the specified regions from META.
- * @param catalogTracker
- * @param regionsInfo list of regions to be deleted from META
- * @throws IOException
- */
- public static void deleteRegions(CatalogTracker catalogTracker,
- List regionsInfo) throws IOException {
- List deletes = new ArrayList(regionsInfo.size());
- for (HRegionInfo hri: regionsInfo) {
- deletes.add(new Delete(hri.getRegionName()));
- }
- deleteFromMetaTable(catalogTracker, deletes);
- LOG.info("Deleted " + regionsInfo);
- }
-
- /**
- * Adds and Removes the specified regions from hbase:meta
- * @param catalogTracker
- * @param regionsToRemove list of regions to be deleted from META
- * @param regionsToAdd list of regions to be added to META
- * @throws IOException
- */
- public static void mutateRegions(CatalogTracker catalogTracker,
- final List regionsToRemove, final List regionsToAdd)
- throws IOException {
- List mutation = new ArrayList();
- if (regionsToRemove != null) {
- for (HRegionInfo hri: regionsToRemove) {
- mutation.add(new Delete(hri.getRegionName()));
- }
- }
- if (regionsToAdd != null) {
- for (HRegionInfo hri: regionsToAdd) {
- mutation.add(makePutFromRegionInfo(hri));
- }
- }
- mutateMetaTable(catalogTracker, mutation);
- if (regionsToRemove != null && regionsToRemove.size() > 0) {
- LOG.debug("Deleted " + regionsToRemove);
- }
- if (regionsToAdd != null && regionsToAdd.size() > 0) {
- LOG.debug("Added " + regionsToAdd);
- }
- }
-
- /**
- * Overwrites the specified regions from hbase:meta
- * @param catalogTracker
- * @param regionInfos list of regions to be added to META
- * @throws IOException
- */
- public static void overwriteRegions(CatalogTracker catalogTracker,
- List regionInfos) throws IOException {
- deleteRegions(catalogTracker, regionInfos);
- // Why sleep? This is the easiest way to ensure that the previous deletes does not
- // eclipse the following puts, that might happen in the same ts from the server.
- // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
- // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
- Threads.sleep(20);
- addRegionsToMeta(catalogTracker, regionInfos);
- LOG.info("Overwritten " + regionInfos);
- }
-
- /**
- * Deletes merge qualifiers for the specified merged region.
- * @param catalogTracker
- * @param mergedRegion
- * @throws IOException
- */
- public static void deleteMergeQualifiers(CatalogTracker catalogTracker,
- final HRegionInfo mergedRegion) throws IOException {
- Delete delete = new Delete(mergedRegion.getRegionName());
- delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
- delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
- deleteFromMetaTable(catalogTracker, delete);
- LOG.info("Deleted references in merged region "
- + mergedRegion.getRegionNameAsString() + ", qualifier="
- + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
- + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
- }
-
- private static Put addRegionInfo(final Put p, final HRegionInfo hri)
- throws IOException {
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- hri.toByteArray());
- return p;
- }
-
- private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(sn.getHostAndPort()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
- Bytes.toBytes(sn.getStartcode()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
- Bytes.toBytes(openSeqNum));
- return p;
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
deleted file mode 100644
index f623c4b..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.catalog;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * A tool to migrate the data stored in hbase:meta table to pbuf serialization.
- * Supports migrating from 0.92.x and 0.94.x to 0.96.x for the catalog table.
- * @deprecated will be removed for the major release after 0.96.
- */
-@Deprecated
-public class MetaMigrationConvertingToPB {
-
- private static final Log LOG = LogFactory.getLog(MetaMigrationConvertingToPB.class);
-
- private static class ConvertToPBMetaVisitor implements Visitor {
- private final MasterServices services;
- private long numMigratedRows;
-
- public ConvertToPBMetaVisitor(MasterServices services) {
- this.services = services;
- numMigratedRows = 0;
- }
-
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- // Check info:regioninfo, info:splitA, and info:splitB. Make sure all
- // have migrated HRegionInfos.
- byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
- // Presumes that an edit updating all three cells either succeeds or
- // doesn't -- that we don't have case of info:regioninfo migrated but not
- // info:splitA.
- if (isMigrated(hriBytes)) return true;
- // OK. Need to migrate this row in meta.
-
- //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
- //writable serialization
- HRegionInfo hri = parseFrom(hriBytes);
-
- // Now make a put to write back to meta.
- Put p = MetaEditor.makePutFromRegionInfo(hri);
-
- // Now migrate info:splitA and info:splitB if they are not null
- migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
- migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
-
- MetaEditor.putToCatalogTable(this.services.getCatalogTracker(), p);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Migrated " + Bytes.toString(p.getRow()));
- }
- numMigratedRows++;
- return true;
- }
- }
-
- static void migrateSplitIfNecessary(final Result r, final Put p, final byte [] which)
- throws IOException {
- byte [] hriSplitBytes = getBytes(r, which);
- if (!isMigrated(hriSplitBytes)) {
- //This will 'migrate' the HRI from 092.x and 0.94.x to 0.96+ by reading the
- //writable serialization
- HRegionInfo hri = parseFrom(hriSplitBytes);
- p.addImmutable(HConstants.CATALOG_FAMILY, which, hri.toByteArray());
- }
- }
-
- static HRegionInfo parseFrom(byte[] hriBytes) throws IOException {
- try {
- return HRegionInfo.parseFrom(hriBytes);
- } catch (DeserializationException ex) {
- throw new IOException(ex);
- }
- }
-
- /**
- * @param r Result to dig in.
- * @param qualifier Qualifier to look at in the passed r.
- * @return Bytes for an HRegionInfo or null if no bytes or empty bytes found.
- */
- static byte [] getBytes(final Result r, final byte [] qualifier) {
- byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
- if (hriBytes == null || hriBytes.length <= 0) return null;
- return hriBytes;
- }
-
- static boolean isMigrated(final byte [] hriBytes) {
- if (hriBytes == null || hriBytes.length <= 0) return true;
-
- return ProtobufUtil.isPBMagicPrefix(hriBytes);
- }
-
- /**
- * Converting writable serialization to PB, if it is needed.
- * @param services MasterServices to get a handle on master
- * @return num migrated rows
- * @throws IOException or RuntimeException if something goes wrong
- */
- public static long updateMetaIfNecessary(final MasterServices services)
- throws IOException {
- if (isMetaTableUpdated(services.getCatalogTracker())) {
- LOG.info("META already up-to date with PB serialization");
- return 0;
- }
- LOG.info("META has Writable serializations, migrating hbase:meta to PB serialization");
- try {
- long rows = updateMeta(services);
- LOG.info("META updated with PB serialization. Total rows updated: " + rows);
- return rows;
- } catch (IOException e) {
- LOG.warn("Update hbase:meta with PB serialization failed." + "Master startup aborted.");
- throw e;
- }
- }
-
- /**
- * Update hbase:meta rows, converting writable serialization to PB
- * @return num migrated rows
- */
- static long updateMeta(final MasterServices masterServices) throws IOException {
- LOG.info("Starting update of META");
- ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
- MetaReader.fullScan(masterServices.getCatalogTracker(), v);
- LOG.info("Finished update of META. Total rows updated:" + v.numMigratedRows);
- return v.numMigratedRows;
- }
-
- /**
- * @param catalogTracker the catalog tracker
- * @return True if the meta table has been migrated.
- * @throws IOException
- */
- static boolean isMetaTableUpdated(final CatalogTracker catalogTracker) throws IOException {
- List results = MetaReader.fullScanOfMeta(catalogTracker);
- if (results == null || results.isEmpty()) {
- LOG.info("hbase:meta doesn't have any entries to update.");
- return true;
- }
- for (Result r : results) {
- byte[] value = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- if (!isMigrated(value)) {
- return false;
- }
- }
- return true;
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 7e7ba76..b8d7dfa 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
@@ -212,7 +212,7 @@ public class TableMapReduceUtil {
MutationSerialization.class.getName(), ResultSerialization.class.getName());
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
- int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+ int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
@@ -278,7 +278,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumReduceTasks(String table, JobConf job)
throws IOException {
- int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+ int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
@@ -293,7 +293,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumMapTasks(String table, JobConf job)
throws IOException {
- int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+ int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumMapTasks() > regions)
job.setNumMapTasks(regions);
}
@@ -308,7 +308,7 @@ public class TableMapReduceUtil {
*/
public static void setNumReduceTasks(String table, JobConf job)
throws IOException {
- job.setNumReduceTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
+ job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
}
/**
@@ -321,7 +321,7 @@ public class TableMapReduceUtil {
*/
public static void setNumMapTasks(String table, JobConf job)
throws IOException {
- job.setNumMapTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
+ job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 5153474..1dec97f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -636,7 +636,7 @@ public class TableMapReduceUtil {
job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
- int regions = MetaReader.getRegionCount(conf, table);
+ int regions = MetaTableAccessor.getRegionCount(conf, table);
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
@@ -661,7 +661,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumReduceTasks(String table, Job job)
throws IOException {
- int regions = MetaReader.getRegionCount(job.getConfiguration(), table);
+ int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table);
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
@@ -676,7 +676,7 @@ public class TableMapReduceUtil {
*/
public static void setNumReduceTasks(String table, Job job)
throws IOException {
- job.setNumReduceTasks(MetaReader.getRegionCount(job.getConfiguration(), table));
+ job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table));
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 529a333..2337db5 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -55,8 +55,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
import org.apache.hadoop.hbase.coordination.ZkOpenRegionCoordination;
@@ -77,7 +76,6 @@ import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
@@ -88,7 +86,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -125,8 +123,6 @@ public class AssignmentManager extends ZooKeeperListener {
private boolean shouldAssignRegionsWithFavoredNodes;
- private CatalogTracker catalogTracker;
-
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
@@ -235,20 +231,19 @@ public class AssignmentManager extends ZooKeeperListener {
*
* @param server
* @param serverManager
- * @param catalogTracker
+ * @param metaRegionLocator
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
- CatalogTracker catalogTracker, final LoadBalancer balancer,
+ final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException,
IOException, CoordinatedStateException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
- this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionsToReopen = Collections.synchronizedMap
(new HashMap ());
@@ -373,7 +368,8 @@ public class AssignmentManager extends ZooKeeperListener {
public Pair getReopenStatus(TableName tableName)
throws IOException {
List hris =
- MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
+ MetaTableAccessor.getTableRegions(this.watcher, this.server.getShortCircuitConnection(),
+ tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
@@ -645,7 +641,7 @@ public class AssignmentManager extends ZooKeeperListener {
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
- MetaRegionTracker.setMetaLocation(watcher, sn);
+ MetaRegionLocator.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
@@ -1015,7 +1011,8 @@ public class AssignmentManager extends ZooKeeperListener {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
- FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
+ FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes,
+ this.server.getShortCircuitConnection());
}
/**
@@ -1038,7 +1035,8 @@ public class AssignmentManager extends ZooKeeperListener {
} else {
try {
byte [] name = rt.getRegionName();
- Pair p = MetaReader.getRegion(catalogTracker, name);
+ Pair p = MetaTableAccessor.getRegion(
+ this.server.getShortCircuitConnection(), name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
@@ -1804,14 +1802,15 @@ public class AssignmentManager extends ZooKeeperListener {
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
- ServerName server = catalogTracker.getMetaLocation();
+ ServerName server = MetaRegionLocator.getMetaRegionLocation(this.server.getZooKeeper());
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
- catalogTracker.waitForMeta();
+ MetaRegionLocator.waitMetaRegionLocation(server.getZooKeeper());
Pair r =
- MetaReader.getRegion(catalogTracker, region.getRegionName());
+ MetaTableAccessor.getRegion(server.getShortCircuitConnection(),
+ region.getRegionName());
ServerName server = r == null ? null : r.getSecond();
return regionStates.isServerDeadAndNotProcessed(server);
} catch (IOException ioe) {
@@ -2421,7 +2420,7 @@ public class AssignmentManager extends ZooKeeperListener {
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
- MetaRegionTracker.deleteMetaLocation(this.watcher);
+ MetaRegionLocator.deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
@@ -2543,7 +2542,8 @@ public class AssignmentManager extends ZooKeeperListener {
// Scan hbase:meta for all user regions, skipping any disabled tables
Map allRegions;
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
- new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true);
+ new SnapshotOfRegionAssignmentFromMeta(this.server.getShortCircuitConnection(),
+ disabledOrDisablingOrEnabling, true);
snapshotOfRegionAssignment.initialize();
allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap();
if (allRegions == null || allRegions.isEmpty()) return;
@@ -2614,7 +2614,7 @@ public class AssignmentManager extends ZooKeeperListener {
ZooKeeperProtos.Table.State.ENABLING);
// Region assignment from META
- List results = MetaReader.fullScan(this.catalogTracker);
+ List results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection());
// Get any new but slow to checkin region server that joined the cluster
Set onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
@@ -2705,7 +2705,7 @@ public class AssignmentManager extends ZooKeeperListener {
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
- new DisableTableHandler(this.server, tableName, catalogTracker,
+ new DisableTableHandler(this.server, tableName,
this, tableLockManager, true).prepare().process();
}
}
@@ -2732,7 +2732,7 @@ public class AssignmentManager extends ZooKeeperListener {
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
- catalogTracker, this, tableLockManager, true);
+ this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 59bc01e..81a0e1a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -40,8 +40,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Result;
@@ -199,7 +198,8 @@ public class CatalogJanitor extends Chore {
+ " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
- MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
+ MetaTableAccessor.deleteMergeQualifiers(server.getShortCircuitConnection(),
+ mergedRegion);
return true;
}
return false;
@@ -331,7 +331,7 @@ public class CatalogJanitor extends Chore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
- MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
+ MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent);
result = true;
}
return result;
@@ -403,9 +403,9 @@ public class CatalogJanitor extends Chore {
throws IOException {
// Get merge regions if it is a merged region and already has merge
// qualifier
- Pair mergeRegions = MetaReader
- .getRegionsFromMergeQualifier(this.services.getCatalogTracker(),
- region.getRegionName());
+ Pair mergeRegions = MetaTableAccessor
+ .getRegionsFromMergeQualifier(this.services.getShortCircuitConnection(),
+ region.getRegionName());
if (mergeRegions == null
|| (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
// It doesn't have merge qualifier, no need to clean
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c798e4b..c5643d6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -408,7 +409,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
- this.catalogTracker, this.balancer, this.service, this.metricsMaster,
+ this.balancer, this.service, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
@@ -481,9 +482,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.serverManager = createServerManager(this, this);
- // Now we have the cluster ID, start catalog tracker
- startCatalogTracker();
-
// Invalidate all write locks held previously
this.tableLockManager.reapWriteLocks();
@@ -522,7 +520,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
// log splitting for hbase:meta server
- ServerName oldMetaServerLocation = this.catalogTracker.getMetaLocation();
+ ServerName oldMetaServerLocation = MetaRegionLocator.getMetaRegionLocation(this.getZooKeeper());
if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
splitMetaLogBeforeAssignment(oldMetaServerLocation);
// Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
@@ -571,12 +569,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.serverManager.processDeadServer(tmpServer, true);
}
- // Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization
- // in meta. This must happen before we assign all user regions or else the assignment will
- // fail.
- org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB
- .updateMetaIfNecessary(this);
-
// Fix up assignment manager status
status.setStatus("Starting assignment manager");
this.assignmentManager.joinCluster();
@@ -674,8 +666,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO);
boolean rit = this.assignmentManager
.processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.FIRST_META_REGIONINFO);
- boolean metaRegionLocation = this.catalogTracker.verifyMetaRegionLocation(timeout);
- ServerName currentMetaServer = this.catalogTracker.getMetaLocation();
+ boolean metaRegionLocation = MetaRegionLocator.verifyMetaRegionLocation(
+ this.getShortCircuitConnection(), this.getZooKeeper(), timeout);
+ ServerName currentMetaServer = MetaRegionLocator.getMetaRegionLocation(this.getZooKeeper());
if (!metaRegionLocation) {
// Meta location is not verified. It should be in transition, or offline.
// We will wait for it to be assigned in enableSSHandWaitForMeta below.
@@ -722,7 +715,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
enableServerShutdownHandler(assigned != 0);
LOG.info("hbase:meta assigned=" + assigned + ", rit=" + rit +
- ", location=" + catalogTracker.getMetaLocation());
+ ", location=" + MetaRegionLocator.getMetaRegionLocation(this.getZooKeeper()));
status.setStatus("META assigned.");
}
@@ -762,7 +755,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
if (waitForMeta) {
- this.catalogTracker.waitForMeta();
+ MetaRegionLocator.waitMetaRegionLocation(this.getZooKeeper());
// Above check waits for general meta availability but this does not
// guarantee that the transition has completed
this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
@@ -1406,7 +1399,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
this.service.submit(new EnableTableHandler(this, tableName,
- catalogTracker, assignmentManager, tableLockManager, false).prepare());
+ assignmentManager, tableLockManager, false).prepare());
if (cpHost != null) {
cpHost.postEnableTable(tableName);
}
@@ -1420,7 +1413,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
this.service.submit(new DisableTableHandler(this, tableName,
- catalogTracker, assignmentManager, tableLockManager, false).prepare());
+ assignmentManager, tableLockManager, false).prepare());
if (cpHost != null) {
cpHost.postDisableTable(tableName);
}
@@ -1482,7 +1475,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (isCatalogTable(tableName)) {
throw new IOException("Can't modify catalog tables");
}
- if (!MetaReader.tableExists(getCatalogTracker(), tableName)) {
+ if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) {
throw new TableNotFoundException(tableName);
}
if (!getAssignmentManager().getTableStateManager().
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 59e93ad..8ced547 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.zookeeper.KeeperException;
/**
@@ -674,8 +675,9 @@ public class MasterFileSystem {
throws IOException {
if (!this.master.isStopped()) {
try {
- this.master.getCatalogTracker().waitForMeta();
- return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName);
+ MetaRegionLocator.waitMetaRegionLocation(this.master.getZooKeeper());
+ return MetaTableAccessor.getServerUserRegions(this.master.getShortCircuitConnection(),
+ serverName);
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index b2581e7..1cf2098 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
@@ -1083,7 +1083,7 @@ public class MasterRpcServices extends RSRpcServices
try {
master.checkInitialized();
Pair pair =
- MetaReader.getRegion(master.getCatalogTracker(), regionName);
+ MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName);
if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) {
@@ -1214,7 +1214,7 @@ public class MasterRpcServices extends RSRpcServices
+ " actual: " + type);
}
Pair pair =
- MetaReader.getRegion(master.getCatalogTracker(), regionName);
+ MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName);
if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
index 7a27489..cf45412 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
/**
* The servlet responsible for rendering the index page of the
@@ -87,12 +88,7 @@ public class MasterStatusServlet extends HttpServlet {
}
private ServerName getMetaLocationOrNull(HMaster master) {
- try {
- return (master.getCatalogTracker() == null) ? null : master.getCatalogTracker().getMetaLocation();
- } catch (InterruptedException e) {
- LOG.warn("Unable to get meta location", e);
- return null;
- }
+ return MetaRegionLocator.getMetaRegionLocation(master.getZooKeeper());
}
private Map getFragmentationInfo(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 848ce7a..282641e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -47,9 +47,9 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -141,7 +141,7 @@ public class RegionPlacementMaintainer {
public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot()
throws IOException {
SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot =
- new SnapshotOfRegionAssignmentFromMeta(new CatalogTracker(this.conf));
+ new SnapshotOfRegionAssignmentFromMeta(HConnectionManager.getConnection(conf));
currentAssignmentShapshot.initialize();
return currentAssignmentShapshot;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 5f96a22..079fcdf 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -761,7 +761,7 @@ public class RegionStates {
try {
Pair p =
- MetaReader.getRegion(server.getCatalogTracker(), regionName);
+ MetaTableAccessor.getRegion(server.getShortCircuitConnection(), regionName);
HRegionInfo hri = p == null ? null : p.getFirst();
if (hri != null) {
createRegionState(hri);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index b98c860..ed04b51 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor.Visitor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
@@ -54,7 +54,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
private static final Log LOG = LogFactory.getLog(SnapshotOfRegionAssignmentFromMeta.class
.getName());
- private CatalogTracker tracker;
+ private final HConnection hConnection;
/** the table name to region map */
private final Map> tableToRegionMap;
@@ -71,13 +71,13 @@ public class SnapshotOfRegionAssignmentFromMeta {
private final Set disabledTables;
private final boolean excludeOfflinedSplitParents;
- public SnapshotOfRegionAssignmentFromMeta(CatalogTracker tracker) {
- this(tracker, new HashSet(), false);
+ public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection) {
+ this(hConnection, new HashSet(), false);
}
- public SnapshotOfRegionAssignmentFromMeta(CatalogTracker tracker, Set disabledTables,
+ public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection, Set disabledTables,
boolean excludeOfflinedSplitParents) {
- this.tracker = tracker;
+ this.hConnection = hConnection;
tableToRegionMap = new HashMap>();
regionToRegionServerMap = new HashMap();
regionServerToRegionMap = new HashMap>();
@@ -94,7 +94,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
public void initialize() throws IOException {
LOG.info("Start to scan the hbase:meta for the current region assignment " +
"snappshot");
- // TODO: at some point this code could live in the MetaReader
+ // TODO: at some point this code could live in the MetaTableAccessor
Visitor v = new Visitor() {
@Override
public boolean visit(Result result) throws IOException {
@@ -133,7 +133,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
}
};
// Scan hbase:meta to pick up user regions
- MetaReader.fullScan(tracker, v);
+ MetaTableAccessor.fullScan(hConnection, v);
//regionToRegionServerMap = regions;
LOG.info("Finished to scan the hbase:meta for the current region assignment" +
"snapshot");
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index e8e5a28..b32c777 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.NamespaceExistException;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZKNamespaceManager;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
@@ -80,8 +80,8 @@ public class TableNamespaceManager {
}
public void start() throws IOException {
- if (!MetaReader.tableExists(masterServices.getCatalogTracker(),
- TableName.NAMESPACE_TABLE_NAME)) {
+ if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(),
+ TableName.NAMESPACE_TABLE_NAME)) {
LOG.info("Namespace table not found. Creating...");
createNamespaceTable(masterServices);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
index 2a138d3..31a42d8 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.master.RackManager;
@@ -91,7 +91,7 @@ public class FavoredNodeAssignmentHelper {
*/
public static void updateMetaWithFavoredNodesInfo(
Map> regionToFavoredNodes,
- CatalogTracker catalogTracker) throws IOException {
+ HConnection hConnection) throws IOException {
List puts = new ArrayList();
for (Map.Entry> entry : regionToFavoredNodes.entrySet()) {
Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
@@ -99,7 +99,7 @@ public class FavoredNodeAssignmentHelper {
puts.add(put);
}
}
- MetaEditor.putsToMetaTable(catalogTracker, puts);
+ MetaTableAccessor.putsToMetaTable(hConnection, puts);
LOG.info("Added " + puts.size() + " regions in META");
}
@@ -141,7 +141,7 @@ public class FavoredNodeAssignmentHelper {
throws IOException {
Put put = null;
if (favoredNodeList != null) {
- put = MetaEditor.makePutFromRegionInfo(regionInfo);
+ put = MetaTableAccessor.makePutFromRegionInfo(regionInfo);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
index a2730c5..67d73df 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
@@ -73,7 +73,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
List plans = new ArrayList();
//perform a scan of the meta to get the latest updates (if any)
SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment =
- new SnapshotOfRegionAssignmentFromMeta(super.services.getCatalogTracker());
+ new SnapshotOfRegionAssignmentFromMeta(super.services.getShortCircuitConnection());
try {
snaphotOfRegionAssignment.initialize();
} catch (IOException ie) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index e723247..e81e7fc 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -35,9 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -51,6 +49,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
/**
* Handler to create a table.
@@ -62,7 +61,6 @@ public class CreateTableHandler extends EventHandler {
protected final HTableDescriptor hTableDescriptor;
protected final Configuration conf;
private final AssignmentManager assignmentManager;
- private final CatalogTracker catalogTracker;
private final TableLockManager tableLockManager;
private final HRegionInfo [] newRegions;
private final TableLock tableLock;
@@ -76,7 +74,6 @@ public class CreateTableHandler extends EventHandler {
this.hTableDescriptor = hTableDescriptor;
this.conf = conf;
this.newRegions = newRegions;
- this.catalogTracker = masterServices.getCatalogTracker();
this.assignmentManager = masterServices.getAssignmentManager();
this.tableLockManager = masterServices.getTableLockManager();
@@ -84,12 +81,11 @@ public class CreateTableHandler extends EventHandler {
, EventType.C_M_CREATE_TABLE.toString());
}
- public CreateTableHandler prepare()
- throws NotAllMetaRegionsOnlineException, TableExistsException, IOException {
+ public CreateTableHandler prepare() throws IOException {
int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
// Need hbase:meta availability to create a table
try {
- if (catalogTracker.waitForMeta(timeout) == null) {
+ if (MetaRegionLocator.waitMetaRegionLocation(this.server.getZooKeeper(), timeout) == null) {
throw new NotAllMetaRegionsOnlineException();
}
} catch (InterruptedException e) {
@@ -104,7 +100,7 @@ public class CreateTableHandler extends EventHandler {
boolean success = false;
try {
TableName tableName = this.hTableDescriptor.getTableName();
- if (MetaReader.tableExists(catalogTracker, tableName)) {
+ if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
throw new TableExistsException(tableName);
}
@@ -236,7 +232,7 @@ public class CreateTableHandler extends EventHandler {
if (regionInfos != null && regionInfos.size() > 0) {
// 4. Add regions to META
- addRegionsToMeta(this.catalogTracker, regionInfos);
+ addRegionsToMeta(regionInfos);
// 5. Trigger immediate assignment of the regions in round-robin fashion
ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
@@ -278,8 +274,8 @@ public class CreateTableHandler extends EventHandler {
/**
* Add the specified set of regions to the hbase:meta table.
*/
- protected void addRegionsToMeta(final CatalogTracker ct, final List regionInfos)
+ protected void addRegionsToMeta(final List regionInfos)
throws IOException {
- MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos);
+ MetaTableAccessor.addRegionsToMeta(this.server.getShortCircuitConnection(), regionInfos);
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
index 8874ff1..668c2f9 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -133,7 +133,7 @@ public class DeleteTableHandler extends TableEventHandler {
throws IOException, CoordinatedStateException {
// 1. Remove regions from META
LOG.debug("Deleting regions from META");
- MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
+ MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions);
// -----------------------------------------------------------------------
// NOTE: At this point we still have data on disk, but nothing in hbase:meta
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index a83eb3f..54d668c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -31,8 +31,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
@@ -56,17 +55,14 @@ public class DisableTableHandler extends EventHandler {
private final TableName tableName;
private final AssignmentManager assignmentManager;
private final TableLockManager tableLockManager;
- private final CatalogTracker catalogTracker;
private final boolean skipTableStateCheck;
private TableLock tableLock;
- public DisableTableHandler(Server server, TableName tableName,
- CatalogTracker catalogTracker, AssignmentManager assignmentManager,
+ public DisableTableHandler(Server server, TableName tableName, AssignmentManager assignmentManager,
TableLockManager tableLockManager, boolean skipTableStateCheck) {
super(server, EventType.C_M_DISABLE_TABLE);
this.tableName = tableName;
this.assignmentManager = assignmentManager;
- this.catalogTracker = catalogTracker;
this.tableLockManager = tableLockManager;
this.skipTableStateCheck = skipTableStateCheck;
}
@@ -84,7 +80,7 @@ public class DisableTableHandler extends EventHandler {
boolean success = false;
try {
// Check if table exists
- if (!MetaReader.tableExists(catalogTracker, tableName)) {
+ if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
throw new TableNotFoundException(tableName);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index 6fb2302..cb623ea 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -33,8 +33,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -58,16 +57,14 @@ public class EnableTableHandler extends EventHandler {
private final TableName tableName;
private final AssignmentManager assignmentManager;
private final TableLockManager tableLockManager;
- private final CatalogTracker catalogTracker;
private boolean skipTableStateCheck = false;
private TableLock tableLock;
public EnableTableHandler(Server server, TableName tableName,
- CatalogTracker catalogTracker, AssignmentManager assignmentManager,
- TableLockManager tableLockManager, boolean skipTableStateCheck) {
+ AssignmentManager assignmentManager, TableLockManager tableLockManager,
+ boolean skipTableStateCheck) {
super(server, EventType.C_M_ENABLE_TABLE);
this.tableName = tableName;
- this.catalogTracker = catalogTracker;
this.assignmentManager = assignmentManager;
this.tableLockManager = tableLockManager;
this.skipTableStateCheck = skipTableStateCheck;
@@ -83,7 +80,7 @@ public class EnableTableHandler extends EventHandler {
boolean success = false;
try {
// Check if table exists
- if (!MetaReader.tableExists(catalogTracker, tableName)) {
+ if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
// retainAssignment is true only during recovery. In normal case it is false
if (!this.skipTableStateCheck) {
throw new TableNotFoundException(tableName);
@@ -181,8 +178,9 @@ public class EnableTableHandler extends EventHandler {
ServerManager serverManager = ((HMaster)this.server).getServerManager();
// Get the regions of this table. We're done when all listed
// tables are onlined.
- List> tableRegionsAndLocations = MetaReader
- .getTableRegionsAndLocations(this.catalogTracker, tableName, true);
+ List> tableRegionsAndLocations = MetaTableAccessor
+ .getTableRegionsAndLocations(this.server.getZooKeeper(),
+ this.server.getShortCircuitConnection(), tableName, true);
int countOfRegionsInTable = tableRegionsAndLocations.size();
Map regionsToAssign =
regionsToAssignWithServerName(tableRegionsAndLocations);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
index b7c1409..81e0c9f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.DeadServer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.zookeeper.KeeperException;
import com.google.common.annotations.VisibleForTesting;
@@ -87,7 +88,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
verifyAndAssignMetaWithRetries();
- } else if (!this.services.getCatalogTracker().isMetaLocationAvailable()) {
+ } else if (!MetaRegionLocator.isLocationAvailable(this.server.getZooKeeper())) {
// the meta location as per master is null. This could happen in case when meta assignment
// in previous run failed, while meta znode has been updated to null. We should try to
// assign the meta again.
@@ -150,14 +151,16 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
throws InterruptedException, IOException, KeeperException {
long timeout = this.server.getConfiguration().
getLong("hbase.catalog.verification.timeout", 1000);
- if (!this.server.getCatalogTracker().verifyMetaRegionLocation(timeout)) {
+ if (!MetaRegionLocator.verifyMetaRegionLocation(this.server.getShortCircuitConnection(),
+ this.server.getZooKeeper(), timeout)) {
this.services.getAssignmentManager().assignMeta();
- } else if (serverName.equals(server.getCatalogTracker().getMetaLocation())) {
+ } else if (serverName.equals(MetaRegionLocator.getMetaRegionLocation(
+ this.server.getZooKeeper()))) {
throw new IOException("hbase:meta is onlined on the dead server "
+ serverName);
} else {
LOG.info("Skip assigning hbase:meta, because it is online on the "
- + server.getCatalogTracker().getMetaLocation());
+ + MetaRegionLocator.getMetaRegionLocation(this.server.getZooKeeper()));
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index fc391af..0d8a061 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -35,8 +35,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
@@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.zookeeper.KeeperException;
@@ -148,7 +148,7 @@ public class ServerShutdownHandler extends EventHandler {
// Wait on meta to come online; we need it to progress.
// TODO: Best way to hold strictly here? We should build this retry logic
- // into the MetaReader operations themselves.
+ // into the MetaTableAccessor operations themselves.
// TODO: Is the reading of hbase:meta necessary when the Master has state of
// cluster in its head? It should be possible to do without reading hbase:meta
// in all but one case. On split, the RS updates the hbase:meta
@@ -164,11 +164,11 @@ public class ServerShutdownHandler extends EventHandler {
NavigableMap hris = null;
while (!this.server.isStopped()) {
try {
- this.server.getCatalogTracker().waitForMeta();
+ MetaRegionLocator.waitMetaRegionLocation(this.server.getZooKeeper());
// Skip getting user regions if the server is stopped.
if (!this.server.isStopped()) {
- hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
- this.serverName);
+ hris = MetaTableAccessor.getServerUserRegions(this.server.getShortCircuitConnection(),
+ this.serverName);
}
break;
} catch (InterruptedException e) {
@@ -227,7 +227,7 @@ public class ServerShutdownHandler extends EventHandler {
Lock lock = am.acquireRegionLock(encodedName);
try {
RegionState rit = regionStates.getRegionTransitionState(hri);
- if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
+ if (processDeadRegion(hri, e.getValue(), am)) {
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
// If this region is in transition on the dead server, it must be
@@ -334,7 +334,7 @@ public class ServerShutdownHandler extends EventHandler {
* @throws IOException
*/
public static boolean processDeadRegion(HRegionInfo hri, Result result,
- AssignmentManager assignmentManager, CatalogTracker catalogTracker)
+ AssignmentManager assignmentManager)
throws IOException {
boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
if (!tablePresent) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index ed34875..db3db69 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
@@ -125,8 +125,8 @@ public abstract class TableEventHandler extends EventHandler {
tableName);
List hris =
- MetaReader.getTableRegions(this.server.getCatalogTracker(),
- tableName);
+ MetaTableAccessor.getTableRegions(this.server.getZooKeeper(),
+ this.server.getShortCircuitConnection(), tableName);
handleTableOperation(hris);
if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
getAssignmentManager().getTableStateManager().isTableState(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
index fce86d1..1dce3f2 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -124,7 +124,8 @@ public class TruncateTableHandler extends DeleteTableHandler {
}
// 4. Add regions to META
- MetaEditor.addRegionsToMeta(masterServices.getCatalogTracker(), regionInfos);
+ MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(),
+ regionInfos);
// 5. Trigger immediate assignment of the regions in round-robin fashion
ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index d902c58..b8dbcd3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterServices;
@@ -139,10 +138,10 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
}
@Override
- protected void addRegionsToMeta(final CatalogTracker ct, final List regionInfos)
+ protected void addRegionsToMeta(final List regionInfos)
throws IOException {
- super.addRegionsToMeta(ct, regionInfos);
- metaChanges.updateMetaParentRegions(ct, regionInfos);
+ super.addRegionsToMeta(regionInfos);
+ metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), regionInfos);
}
@Override
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index d5d9993..ce3d6b7 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
@@ -149,8 +149,8 @@ public final class MasterSnapshotVerifier {
* @throws IOException if we can't reach hbase:meta or read the files from the FS
*/
private void verifyRegions(final SnapshotManifest manifest) throws IOException {
- List regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
- tableName);
+ List regions = MetaTableAccessor.getTableRegions(
+ this.services.getZooKeeper(), this.services.getShortCircuitConnection(), tableName);
Map regionManifests = manifest.getRegionManifestsMap();
if (regionManifests == null) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index 2b97505..f71f2ef 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.executor.EventType;
@@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
@Override
protected void handleTableOperation(List hris) throws IOException {
MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
- CatalogTracker catalogTracker = masterServices.getCatalogTracker();
+ HConnection conn = masterServices.getShortCircuitConnection();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
TableName tableName = hTableDescriptor.getTableName();
@@ -149,7 +149,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
// that are not correct after the restore.
List hrisToRemove = new LinkedList();
if (metaChanges.hasRegionsToRemove()) hrisToRemove.addAll(metaChanges.getRegionsToRemove());
- MetaEditor.deleteRegions(catalogTracker, hrisToRemove);
+ MetaTableAccessor.deleteRegions(conn, hrisToRemove);
// 4.2 Add the new set of regions to META
//
@@ -159,11 +159,11 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
// in the snapshot folder.
hris.clear();
if (metaChanges.hasRegionsToAdd()) hris.addAll(metaChanges.getRegionsToAdd());
- MetaEditor.addRegionsToMeta(catalogTracker, hris);
+ MetaTableAccessor.addRegionsToMeta(conn, hris);
if (metaChanges.hasRegionsToRestore()) {
- MetaEditor.overwriteRegions(catalogTracker, metaChanges.getRegionsToRestore());
+ MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore());
}
- metaChanges.updateMetaParentRegions(catalogTracker, hris);
+ metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), hris);
// At this point the restore is complete. Next step is enabling the table.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index ddedf9f..0853afb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -694,7 +694,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
- if (MetaReader.tableExists(master.getCatalogTracker(), tableName)) {
+ if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) {
if (master.getAssignmentManager().getTableStateManager().isTableState(
TableName.valueOf(fsSnapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) {
throw new UnsupportedOperationException("Table '" +
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 4d6182b..2ba729d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -168,8 +168,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
monitor.rethrowException();
List> regionsAndLocations =
- MetaReader.getTableRegionsAndLocations(this.server.getCatalogTracker(),
- snapshotTable, false);
+ MetaTableAccessor.getTableRegionsAndLocations(this.server.getZooKeeper(),
+ this.server.getShortCircuitConnection(), snapshotTable, false);
// run the snapshot
snapshotRegions(regionsAndLocations);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index b054813..9b1e3e6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -431,8 +431,8 @@ public class NamespaceUpgrade implements Tool {
newRegionDir);
}
}
- meta.put(MetaEditor.makePutFromRegionInfo(newRegionInfo));
- meta.delete(MetaEditor.makeDeleteFromRegionInfo(oldRegionInfo));
+ meta.put(MetaTableAccessor.makePutFromRegionInfo(newRegionInfo));
+ meta.delete(MetaTableAccessor.makeDeleteFromRegionInfo(oldRegionInfo));
}
} finally {
meta.flushcache();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index 16456c3..1025289 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -124,8 +124,9 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
List> regionsAndLocations = null;
try {
regionsAndLocations =
- MetaReader.getTableRegionsAndLocations(this.master.getCatalogTracker(),
- TableName.valueOf(desc.getInstance()), false);
+ MetaTableAccessor.getTableRegionsAndLocations(this.master.getZooKeeper(),
+ this.master.getShortCircuitConnection(),
+ TableName.valueOf(desc.getInstance()), false);
} catch (InterruptedException e1) {
String msg = "Failed to get regions for '" + desc.getInstance() + "'";
LOG.error(msg);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 7c198ad..0d9242d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -72,8 +72,7 @@ import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZNodeClearer;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -132,7 +131,7 @@ import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.RecoveringRegionWatcher;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -179,8 +178,8 @@ public class HRegionServer extends HasThread implements
protected HeapMemoryManager hMemManager;
- // catalog tracker
- protected CatalogTracker catalogTracker;
+ // short-circuit connection to this server
+ protected HConnection scConnection;
// Watch if a region is out of recovering state from ZooKeeper
@SuppressWarnings("unused")
@@ -537,14 +536,13 @@ public class HRegionServer extends HasThread implements
}
/**
- * Create CatalogTracker.
+ * Create wrapped short-circuit connection to this server.
* In its own method so can intercept and mock it over in tests.
* @throws IOException
*/
- protected CatalogTracker createCatalogTracker() throws IOException {
- HConnection conn = ConnectionUtils.createShortCircuitHConnection(
+ protected HConnection createShortCircuitConnection() throws IOException {
+ return ConnectionUtils.createShortCircuitHConnection(
HConnectionManager.getConnection(conf), serverName, rpcServices, rpcServices);
- return new CatalogTracker(zooKeeper, conf, conn, this);
}
/**
@@ -620,7 +618,7 @@ public class HRegionServer extends HasThread implements
}
// Now we have the cluster ID, start catalog tracker
- startCatalogTracker();
+ scConnection = createShortCircuitConnection();
// watch for snapshots and other procedures
try {
@@ -695,17 +693,6 @@ public class HRegionServer extends HasThread implements
}
/**
- * Create and start the catalog tracker if not already done.
- */
- protected synchronized void startCatalogTracker()
- throws IOException, InterruptedException {
- if (catalogTracker == null) {
- catalogTracker = createCatalogTracker();
- catalogTracker.start();
- }
- }
-
- /**
* The HRegionServer sticks in this loop until closed.
*/
@Override
@@ -848,7 +835,13 @@ public class HRegionServer extends HasThread implements
}
// Interrupt catalog tracker here in case any regions being opened out in
// handlers are stuck waiting on meta.
- if (this.catalogTracker != null) this.catalogTracker.stop();
+ if (this.scConnection != null || !scConnection.isClosed()) try {
+ this.scConnection.close();
+ } catch (IOException e) {
+ // Although the {@link Closeable} interface throws an {@link
+ // IOException}, in reality, the implementation would never do that.
+ LOG.error("Attempt to close catalog tracker's connection failed.", e);
+ }
// Closing the compactSplit thread before closing meta regions
if (!this.killed && containsMetaTableRegions()) {
@@ -1625,8 +1618,8 @@ public class HRegionServer extends HasThread implements
}
@Override
- public CatalogTracker getCatalogTracker() {
- return this.catalogTracker;
+ public HConnection getShortCircuitConnection() {
+ return this.scConnection;
}
@Override
@@ -1653,7 +1646,7 @@ public class HRegionServer extends HasThread implements
}
@Override
- public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
+ public void postOpenDeployTasks(final HRegion r)
throws KeeperException, IOException {
rpcServices.checkOpen();
LOG.info("Post open deploy tasks for " + r.getRegionNameAsString());
@@ -1675,9 +1668,9 @@ public class HRegionServer extends HasThread implements
// Update ZK, or META
if (r.getRegionInfo().isMetaRegion()) {
- MetaRegionTracker.setMetaLocation(getZooKeeper(), serverName);
+ MetaRegionLocator.setMetaLocation(getZooKeeper(), serverName);
} else {
- MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
+ MetaTableAccessor.updateRegionLocation(scConnection, r.getRegionInfo(),
this.serverName, openSeqNum);
}
LOG.debug("Finished post open deploy task for " + r.getRegionNameAsString());
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9d90d1d..abac89e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete;
@@ -1204,8 +1204,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
// See HBASE-5094. Cross check with hbase:meta if still this RS is owning
// the region.
- Pair p = MetaReader.getRegion(
- regionServer.catalogTracker, region.getRegionName());
+ Pair p = MetaTableAccessor.getRegion(
+ regionServer.getShortCircuitConnection(), region.getRegionName());
if (regionServer.serverName.equals(p.getSecond())) {
Boolean closing = regionServer.regionsInTransitionInRS.get(region.getEncodedNameAsBytes());
// Map regionsInTransitionInRSOnly has an entry for a region only if the region
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
index 4c02cfd..a604cb2 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
@@ -38,10 +38,9 @@ import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.executor.EventType;
@@ -317,21 +316,21 @@ public class RegionMergeTransaction {
// rollback
if (!testing) {
if (metaEntries.isEmpty()) {
- MetaEditor.mergeRegions(server.getCatalogTracker(), mergedRegion.getRegionInfo(), region_a
+ MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(), mergedRegion.getRegionInfo(), region_a
.getRegionInfo(), region_b.getRegionInfo(), server.getServerName());
} else {
- mergeRegionsAndPutMetaEntries(server.getCatalogTracker(), mergedRegion.getRegionInfo(),
+ mergeRegionsAndPutMetaEntries(server.getShortCircuitConnection(), mergedRegion.getRegionInfo(),
region_a.getRegionInfo(), region_b.getRegionInfo(), server.getServerName(), metaEntries);
}
}
return mergedRegion;
}
- private void mergeRegionsAndPutMetaEntries(CatalogTracker catalogTracker,
+ private void mergeRegionsAndPutMetaEntries(HConnection hConnection,
HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName serverName,
List metaEntries) throws IOException {
prepareMutationsForMerge(mergedRegion, regionA, regionB, serverName, metaEntries);
- MetaEditor.mutateMetaTable(catalogTracker, metaEntries);
+ MetaTableAccessor.mutateMetaTable(hConnection, metaEntries);
}
public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA,
@@ -339,13 +338,13 @@ public class RegionMergeTransaction {
HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
// Put for parent
- Put putOfMerged = MetaEditor.makePutFromRegionInfo(copyOfMerged);
+ Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged);
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
mutations.add(putOfMerged);
// Deletes for merging regions
- Delete deleteA = MetaEditor.makeDeleteFromRegionInfo(regionA);
- Delete deleteB = MetaEditor.makeDeleteFromRegionInfo(regionB);
+ Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA);
+ Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB);
mutations.add(deleteA);
mutations.add(deleteB);
// The merged is a new region, openSeqNum = 1 is fine.
@@ -548,7 +547,7 @@ public class RegionMergeTransaction {
if (services != null) {
try {
- services.postOpenDeployTasks(merged, server.getCatalogTracker());
+ services.postOpenDeployTasks(merged);
services.addToOnlineRegions(merged);
} catch (KeeperException ke) {
throw new IOException(ke);
@@ -926,8 +925,8 @@ public class RegionMergeTransaction {
if (services == null) return false;
// Get merge regions if it is a merged region and already has merge
// qualifier
- Pair mergeRegions = MetaReader
- .getRegionsFromMergeQualifier(services.getCatalogTracker(), regionName);
+ Pair mergeRegions = MetaTableAccessor
+ .getRegionsFromMergeQualifier(services.getShortCircuitConnection(), regionName);
if (mergeRegions != null &&
(mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
// It has merge qualifier
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index d50fad7..02e18f9 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -25,7 +25,6 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager;
@@ -72,11 +71,10 @@ public interface RegionServerServices
* regionserver
*
* @param r Region to open.
- * @param ct Instance of {@link CatalogTracker}
* @throws KeeperException
* @throws IOException
*/
- void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
+ void postOpenDeployTasks(final HRegion r)
throws KeeperException, IOException;
/**
@@ -106,11 +104,6 @@ public interface RegionServerServices
ExecutorService getExecutorService();
/**
- * @return The RegionServer's CatalogTracker
- */
- CatalogTracker getCatalogTracker();
-
- /**
* @return set of recovering regions on the hosting region server
*/
Map getRecoveringRegions();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index db4dad9..970aad3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
@@ -274,11 +274,11 @@ public class SplitTransaction {
// and assign the parent region.
if (!testing) {
if (metaEntries == null || metaEntries.isEmpty()) {
- MetaEditor.splitRegion(server.getCatalogTracker(),
- parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
- daughterRegions.getSecond().getRegionInfo(), server.getServerName());
+ MetaTableAccessor.splitRegion(server.getShortCircuitConnection(),
+ parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
+ daughterRegions.getSecond().getRegionInfo(), server.getServerName());
} else {
- offlineParentInMetaAndputMetaEntries(server.getCatalogTracker(),
+ offlineParentInMetaAndputMetaEntries(server.getShortCircuitConnection(),
parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), daughterRegions
.getSecond().getRegionInfo(), server.getServerName(), metaEntries);
}
@@ -400,10 +400,10 @@ public class SplitTransaction {
if (services != null) {
try {
// add 2nd daughter first (see HBASE-4335)
- services.postOpenDeployTasks(b, server.getCatalogTracker());
+ services.postOpenDeployTasks(b);
// Should add it to OnlineRegions
services.addToOnlineRegions(b);
- services.postOpenDeployTasks(a, server.getCatalogTracker());
+ services.postOpenDeployTasks(a);
services.addToOnlineRegions(a);
} catch (KeeperException ke) {
throw new IOException(ke);
@@ -455,7 +455,7 @@ public class SplitTransaction {
return regions;
}
- private void offlineParentInMetaAndputMetaEntries(CatalogTracker catalogTracker,
+ private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List metaEntries) throws IOException {
List mutations = metaEntries;
@@ -464,19 +464,19 @@ public class SplitTransaction {
copyOfParent.setSplit(true);
//Put for parent
- Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
- MetaEditor.addDaughtersToPut(putParent, splitA, splitB);
+ Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+ MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
- Put putA = MetaEditor.makePutFromRegionInfo(splitA);
- Put putB = MetaEditor.makePutFromRegionInfo(splitB);
+ Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
+ Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
- MetaEditor.mutateMetaTable(catalogTracker, mutations);
+ MetaTableAccessor.mutateMetaTable(hConnection, mutations);
}
public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index 07235f5..36c045b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -281,9 +281,7 @@ public class OpenRegionHandler extends EventHandler {
/**
* Thread to run region post open tasks. Call {@link #getException()} after
* the thread finishes to check for exceptions running
- * {@link RegionServerServices#postOpenDeployTasks(
- * HRegion, org.apache.hadoop.hbase.catalog.CatalogTracker)}
- * .
+ * {@link RegionServerServices#postOpenDeployTasks(HRegion)
*/
static class PostOpenDeployTasksThread extends Thread {
private Throwable exception = null;
@@ -304,8 +302,7 @@ public class OpenRegionHandler extends EventHandler {
public void run() {
try {
- this.services.postOpenDeployTasks(this.region,
- this.server.getCatalogTracker());
+ this.services.postOpenDeployTasks(this.region);
} catch (IOException e) {
server.abort("Exception running postOpenDeployTasks; region=" +
this.region.getRegionInfo().getEncodedName(), e);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index cdb50fa..eb193e3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.util.Tool;
@@ -149,7 +149,7 @@ public class ReplicationSyncUp extends Configured implements Tool {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index e2eadc1..3f28b3a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@@ -1116,8 +1116,8 @@ public class AccessController extends BaseRegionObserver
@Override
public void postStartMaster(ObserverContext ctx)
throws IOException {
- if (!MetaReader.tableExists(ctx.getEnvironment().getMasterServices().getCatalogTracker(),
- AccessControlLists.ACL_TABLE_NAME)) {
+ if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices()
+ .getShortCircuitConnection(), AccessControlLists.ACL_TABLE_NAME)) {
// initialize the ACL storage table
AccessControlLists.init(ctx.getEnvironment().getMasterServices());
} else {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index a152fcc..26a7881 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
@@ -215,7 +215,7 @@ public class VisibilityController extends BaseRegionObserver implements MasterOb
public void postStartMaster(ObserverContext ctx) throws IOException {
// Need to create the new system table for labels here
MasterServices master = ctx.getEnvironment().getMasterServices();
- if (!MetaReader.tableExists(master.getCatalogTracker(), LABELS_TABLE_NAME)) {
+ if (!MetaTableAccessor.tableExists(master.getShortCircuitConnection(), LABELS_TABLE_NAME)) {
HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
labelsColumn.setBloomFilterType(BloomType.NONE);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index d8d8642..a02250d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
-import java.io.InterruptedIOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
@@ -45,8 +44,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference;
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionM
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -253,7 +252,7 @@ public class RestoreSnapshotHelper {
/**
* Returns the list of new regions added during the on-disk restore.
* The caller is responsible to add the regions to META.
- * e.g MetaEditor.addRegionsToMeta(...)
+ * e.g MetaTableAccessor.addRegionsToMeta(...)
* @return the list of regions to add to META
*/
public List getRegionsToAdd() {
@@ -286,7 +285,7 @@ public class RestoreSnapshotHelper {
/**
* Returns the list of regions removed during the on-disk restore.
* The caller is responsible to remove the regions from META.
- * e.g. MetaEditor.deleteRegions(...)
+ * e.g. MetaTableAccessor.deleteRegions(...)
* @return the list of regions to remove from META
*/
public List getRegionsToRemove() {
@@ -315,7 +314,7 @@ public class RestoreSnapshotHelper {
regionsToRestore.add(hri);
}
- public void updateMetaParentRegions(final CatalogTracker catalogTracker,
+ public void updateMetaParentRegions(HConnection hConnection,
final List regionInfos) throws IOException {
if (regionInfos == null || parentsMap.isEmpty()) return;
@@ -346,9 +345,9 @@ public class RestoreSnapshotHelper {
}
LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> " + daughters);
- MetaEditor.addRegionToMeta(catalogTracker, regionInfo,
- regionsByName.get(daughters.getFirst()),
- regionsByName.get(daughters.getSecond()));
+ MetaTableAccessor.addRegionToMeta(hConnection, regionInfo,
+ regionsByName.get(daughters.getFirst()),
+ regionsByName.get(daughters.getSecond()));
}
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index d0c84b3..baec3d3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -71,7 +71,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.io.IOUtils;
@@ -1136,7 +1136,7 @@ public class HBaseFsck extends Configured {
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
- Put p = MetaEditor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
@@ -1517,7 +1517,7 @@ public class HBaseFsck extends Configured {
ZooKeeperWatcher zkw = createZooKeeperWatcher();
ServerName sn = null;
try {
- sn = MetaRegionTracker.getMetaRegionLocation(zkw);
+ sn = MetaRegionLocator.getMetaRegionLocation(zkw);
} finally {
zkw.close();
}
@@ -1617,7 +1617,7 @@ public class HBaseFsck extends Configured {
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
- Put p = MetaEditor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index d985299..053831a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
@@ -175,7 +175,7 @@ public class HBaseFsckRepair {
public static void fixMetaHoleOnline(Configuration conf,
HRegionInfo hri) throws IOException {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 80bf475..5d69827 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectable;
@@ -331,7 +331,7 @@ class HMerge {
}
newRegion.getRegionInfo().setOffline(true);
- MetaEditor.addRegionToMeta(table, newRegion.getRegionInfo());
+ MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: "
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 7ed1530..4ecb75a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.NoServerForRegionException;
@@ -388,7 +388,7 @@ public class RegionSplitter {
// NOTE: createTable is synchronous on the table, but not on the regions
int onlineRegions = 0;
while (onlineRegions < splitCount) {
- onlineRegions = MetaReader.getRegionCount(conf, tableName);
+ onlineRegions = MetaTableAccessor.getRegionCount(conf, tableName);
LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
if (onlineRegions < splitCount) {
Thread.sleep(10 * 1000); // sleep
diff --git hbase-server/src/main/resources/hbase-webapps/master/table.jsp hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 8df53cb..d562c1d 100644
--- hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -30,6 +30,7 @@
import="org.apache.hadoop.hbase.ServerLoad"
import="org.apache.hadoop.hbase.RegionLoad"
import="org.apache.hadoop.hbase.master.HMaster"
+ import="org.apache.hadoop.hbase.zookeeper.MetaRegionLocator"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"
@@ -43,7 +44,7 @@
String fqtn = request.getParameter("name");
HTable table = new HTable(conf, fqtn);
String tableHeader = "Table Regions
| Name | Region Server | Start Key | End Key | Requests |
";
- ServerName rl = master.getCatalogTracker().getMetaLocation();
+ ServerName rl = MetaRegionLocator.getMetaRegionLocation(master.getZooKeeper());
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false);
Map frags = null;
@@ -195,7 +196,7 @@
<%
// NOTE: Presumes one meta region only.
HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
- ServerName metaLocation = master.getCatalogTracker().waitForMeta(1);
+ ServerName metaLocation = MetaRegionLocator.waitMetaRegionLocation(master.getZooKeeper(), 1);
for (int i = 0; i < 1; i++) {
String url = "//" + metaLocation.getHostname() + ":" + master.getRegionServerInfoPort(metaLocation) + "/";
%>
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 3824294..9c5dec3 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Waiter.Predicate;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -1973,7 +1973,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(table.getName(),
startKeys[i], startKeys[j]);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
newRegions.add(hri);
count++;
}
@@ -2025,7 +2025,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
startKeys[j]);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
newRegions.add(hri);
}
@@ -2039,7 +2039,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @throws IOException When reading the rows fails.
*/
public List getMetaTableRows() throws IOException {
- // TODO: Redo using MetaReader class
+ // TODO: Redo using MetaTableAccessor class
HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
List rows = new ArrayList();
ResultScanner s = t.getScanner(new Scan());
@@ -2059,7 +2059,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @throws IOException When reading the rows fails.
*/
public List getMetaTableRows(TableName tableName) throws IOException {
- // TODO: Redo using MetaReader.
+ // TODO: Redo using MetaTableAccessor.
HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
List rows = new ArrayList();
ResultScanner s = t.getScanner(new Scan());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 6d1cfc4..b63e3cf 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -26,7 +26,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -91,7 +91,7 @@ class MockRegionServerServices implements RegionServerServices {
}
@Override
- public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
+ public void postOpenDeployTasks(HRegion r)
throws KeeperException, IOException {
addToOnlineRegions(r);
}
@@ -126,7 +126,7 @@ class MockRegionServerServices implements RegionServerServices {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
index 777bdb1..27620a7 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -93,7 +92,6 @@ public class TestDrainingServer {
final ServerName SERVERNAME_A = ServerName.valueOf("mockserver_a.org", 1000, 8000);
final ServerName SERVERNAME_B = ServerName.valueOf("mockserver_b.org", 1001, 8000);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(conf);
- CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class);
final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
@@ -137,7 +135,7 @@ public class TestDrainingServer {
Mockito.when(master.getServerManager()).thenReturn(serverManager);
- am = new AssignmentManager(server, serverManager, catalogTracker,
+ am = new AssignmentManager(server, serverManager,
balancer, startupMasterExecutor("mockExecutorService"), null, null);
Mockito.when(master.getAssignmentManager()).thenReturn(am);
@@ -164,7 +162,6 @@ public class TestDrainingServer {
public void testAssignmentManagerDoesntUseDrainedServerWithBulkAssign() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(conf);
- CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class);
AssignmentManager am;
final HMaster master = Mockito.mock(HMaster.class);
final Server server = Mockito.mock(Server.class);
@@ -242,7 +239,7 @@ public class TestDrainingServer {
drainedServers.add(SERVERNAME_C);
drainedServers.add(SERVERNAME_D);
- am = new AssignmentManager(server, serverManager, catalogTracker,
+ am = new AssignmentManager(server, serverManager,
balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null);
Mockito.when(master.getAssignmentManager()).thenReturn(am);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index 0b0e290..81c5e5e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -29,8 +29,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -98,13 +97,9 @@ public class TestRegionRebalancing {
admin.createTable(this.desc, Arrays.copyOfRange(HBaseTestingUtility.KEYS,
1, HBaseTestingUtility.KEYS.length));
this.table = new HTable(UTIL.getConfiguration(), this.desc.getTableName());
- CatalogTracker ct = new CatalogTracker(UTIL.getConfiguration());
- ct.start();
- try {
- MetaReader.fullScanMetaAndPrint(ct);
- } finally {
- ct.stop();
- }
+
+ MetaTableAccessor.fullScanMetaAndPrint(admin.getConnection());
+
assertEquals("Test table should have right number of regions",
HBaseTestingUtility.KEYS.length,
this.table.getStartKeys().length);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
deleted file mode 100644
index caeafbd..0000000
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.ConnectException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.util.Progressable;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * Test {@link CatalogTracker}
- */
-@Category(MediumTests.class)
-public class TestCatalogTracker {
- private static final Log LOG = LogFactory.getLog(TestCatalogTracker.class);
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
- private static final ServerName SN =
- ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
- private ZooKeeperWatcher watcher;
- private Abortable abortable;
-
- @BeforeClass public static void beforeClass() throws Exception {
- // Set this down so tests run quicker
- UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
- UTIL.startMiniZKCluster();
- }
-
- @AfterClass public static void afterClass() throws IOException {
- UTIL.getZkCluster().shutdown();
- }
-
- @Before public void before() throws IOException {
- this.abortable = new Abortable() {
- @Override
- public void abort(String why, Throwable e) {
- LOG.info(why, e);
- }
-
- @Override
- public boolean isAborted() {
- return false;
- }
- };
- this.watcher = new ZooKeeperWatcher(UTIL.getConfiguration(),
- this.getClass().getSimpleName(), this.abortable, true);
- }
-
- @After public void after() {
- try {
- // Clean out meta location or later tests will be confused... they presume
- // start fresh in zk.
- MetaRegionTracker.deleteMetaLocation(this.watcher);
- } catch (KeeperException e) {
- LOG.warn("Unable to delete hbase:meta location", e);
- }
-
- // Clear out our doctored connection or could mess up subsequent tests.
- HConnectionManager.deleteConnection(UTIL.getConfiguration());
-
- this.watcher.close();
- }
-
- private CatalogTracker constructAndStartCatalogTracker(final HConnection c)
- throws IOException, InterruptedException {
- CatalogTracker ct = new CatalogTracker(this.watcher, UTIL.getConfiguration(),
- c, this.abortable);
- ct.start();
- return ct;
- }
-
- /**
- * Test that we get notification if hbase:meta moves.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- */
- @Test public void testThatIfMETAMovesWeAreNotified()
- throws IOException, InterruptedException, KeeperException {
- HConnection connection = Mockito.mock(HConnection.class);
- constructAndStartCatalogTracker(connection);
-
- MetaRegionTracker.setMetaLocation(this.watcher,
- ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
- }
-
- /**
- * Test interruptable while blocking wait on meta.
- * @throws IOException
- * @throws ServiceException
- * @throws InterruptedException
- */
- @Test public void testInterruptWaitOnMeta()
- throws IOException, InterruptedException, ServiceException {
- final ClientProtos.ClientService.BlockingInterface client =
- Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
- HConnection connection = mockConnection(null, client);
-
- Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
- thenReturn(GetResponse.newBuilder().build());
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ServerName meta = ct.getMetaLocation();
- Assert.assertNull(meta);
- Thread t = new Thread() {
- @Override
- public void run() {
- try {
- ct.waitForMeta();
- } catch (InterruptedException e) {
- throw new RuntimeException("Interrupted", e);
- }
- }
- };
- t.start();
- while (!t.isAlive())
- Threads.sleep(1);
- Threads.sleep(1);
- assertTrue(t.isAlive());
- ct.stop();
- // Join the thread... should exit shortly.
- t.join();
- }
-
- private void testVerifyMetaRegionLocationWithException(Exception ex)
- throws IOException, InterruptedException, KeeperException, ServiceException {
- // Mock an ClientProtocol.
- final ClientProtos.ClientService.BlockingInterface implementation =
- Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
- HConnection connection = mockConnection(null, implementation);
-
- // If a 'get' is called on mocked interface, throw connection refused.
- Mockito.when(implementation.get((RpcController) Mockito.any(), (GetRequest) Mockito.any())).
- thenThrow(new ServiceException(ex));
- // Now start up the catalogtracker with our doctored Connection.
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
-
- MetaRegionTracker.setMetaLocation(this.watcher, SN);
- long timeout = UTIL.getConfiguration().
- getLong("hbase.catalog.verification.timeout", 1000);
- Assert.assertFalse(ct.verifyMetaRegionLocation(timeout));
- }
-
- /**
- * Test we survive a connection refused {@link ConnectException}
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- * @throws ServiceException
- */
- @Test
- public void testGetMetaServerConnectionFails()
- throws IOException, InterruptedException, KeeperException, ServiceException {
- testVerifyMetaRegionLocationWithException(new ConnectException("Connection refused"));
- }
-
- /**
- * Test that verifyMetaRegionLocation properly handles getting a
- * ServerNotRunningException. See HBASE-4470.
- * Note this doesn't check the exact exception thrown in the
- * HBASE-4470 as there it is thrown from getHConnection() and
- * here it is thrown from get() -- but those are both called
- * from the same function anyway, and this way is less invasive than
- * throwing from getHConnection would be.
- *
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- * @throws ServiceException
- */
- @Test
- public void testVerifyMetaRegionServerNotRunning()
- throws IOException, InterruptedException, KeeperException, ServiceException {
- testVerifyMetaRegionLocationWithException(new ServerNotRunningYetException("mock"));
- }
-
- /**
- * Test get of meta region fails properly if nothing to connect to.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- * @throws ServiceException
- */
- @Test
- public void testVerifyMetaRegionLocationFails()
- throws IOException, InterruptedException, KeeperException, ServiceException {
- HConnection connection = Mockito.mock(HConnection.class);
- ServiceException connectException =
- new ServiceException(new ConnectException("Connection refused"));
- final AdminProtos.AdminService.BlockingInterface implementation =
- Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
- Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
- (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
- Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())).
- thenReturn(implementation);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
-
- MetaRegionTracker.setMetaLocation(this.watcher,
- ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
- Assert.assertFalse(ct.verifyMetaRegionLocation(100));
- }
-
- @Test (expected = NotAllMetaRegionsOnlineException.class)
- public void testTimeoutWaitForMeta()
- throws IOException, InterruptedException {
- HConnection connection = Mockito.mock(HConnection.class);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ct.waitForMeta(100);
- }
-
- /**
- * Test waiting on meat w/ no timeout specified.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- */
- @Test public void testNoTimeoutWaitForMeta()
- throws IOException, InterruptedException, KeeperException {
- HConnection connection = Mockito.mock(HConnection.class);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ServerName hsa = ct.getMetaLocation();
- Assert.assertNull(hsa);
-
- // Now test waiting on meta location getting set.
- Thread t = new WaitOnMetaThread(ct);
- startWaitAliveThenWaitItLives(t, 1);
- // Set a meta location.
- hsa = setMetaLocation();
- // Join the thread... should exit shortly.
- t.join();
- // Now meta is available.
- Assert.assertTrue(ct.getMetaLocation().equals(hsa));
- }
-
- private ServerName setMetaLocation() throws KeeperException {
- MetaRegionTracker.setMetaLocation(this.watcher, SN);
- return SN;
- }
-
- /**
- * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
- * want to pass a mocked HRS; can be null.
- * @param client A mocked ClientProtocol instance, can be null
- * @return Mock up a connection that returns a {@link Configuration} when
- * {@link HConnection#getConfiguration()} is called, a 'location' when
- * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
- * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
- * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
- * {@link ClientProtos.ClientService.BlockingInterface} instance when
- * {@link HConnection#getClient(ServerName)} is called (Be sure to call
- * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)}
- * when done with this mocked Connection.
- * @throws IOException
- */
- private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
- final ClientProtos.ClientService.BlockingInterface client)
- throws IOException {
- HConnection connection =
- HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
- Mockito.doNothing().when(connection).close();
- // Make it so we return any old location when asked.
- final HRegionLocation anyLocation =
- new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
- Mockito.when(connection.getRegionLocation((TableName) Mockito.any(),
- (byte[]) Mockito.any(), Mockito.anyBoolean())).
- thenReturn(anyLocation);
- Mockito.when(connection.locateRegion((TableName) Mockito.any(),
- (byte[]) Mockito.any())).
- thenReturn(anyLocation);
- if (admin != null) {
- // If a call to getHRegionConnection, return this implementation.
- Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).
- thenReturn(admin);
- }
- if (client != null) {
- // If a call to getClient, return this implementation.
- Mockito.when(connection.getClient(Mockito.any(ServerName.class))).
- thenReturn(client);
- }
- return connection;
- }
-
- /**
- * @return A mocked up Result that fakes a Get on a row in the
- * hbase:meta table.
- * @throws IOException
- */
- private Result getMetaTableRowResult() throws IOException {
- return MetaMockingUtil.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO, SN);
- }
-
- private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
- t.start();
- while(!t.isAlive()) {
- // Wait
- }
- // Wait one second.
- Threads.sleep(ms);
- Assert.assertTrue("Assert " + t.getName() + " still waiting", t.isAlive());
- }
-
- class CountingProgressable implements Progressable {
- final AtomicInteger counter = new AtomicInteger(0);
- @Override
- public void progress() {
- this.counter.incrementAndGet();
- }
- }
-
- /**
- * Wait on META.
- */
- class WaitOnMetaThread extends Thread {
- final CatalogTracker ct;
-
- WaitOnMetaThread(final CatalogTracker ct) {
- super("WaitOnMeta");
- this.ct = ct;
- }
-
- @Override
- public void run() {
- try {
- doWaiting();
- } catch (InterruptedException e) {
- throw new RuntimeException("Failed wait", e);
- }
- LOG.info("Exiting " + getName());
- }
-
- void doWaiting() throws InterruptedException {
- try {
- while (this.ct.waitForMeta(100) == null);
- } catch (NotAllMetaRegionsOnlineException e) {
- // Ignore.
- }
- }
- }
-
-}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
deleted file mode 100644
index 5b6cb03..0000000
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
+++ /dev/null
@@ -1,440 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.catalog;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.migration.NamespaceUpgrade;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test migration that changes HRI serialization into PB. Tests by bringing up a cluster from actual
- * data from a 0.92 cluster, as well as manually downgrading and then upgrading the hbase:meta info.
- * @deprecated Remove after 0.96
- */
-@Category(MediumTests.class)
-@Deprecated
-public class TestMetaMigrationConvertingToPB {
- static final Log LOG = LogFactory.getLog(TestMetaMigrationConvertingToPB.class);
- private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
- private final static String TESTTABLE = "TestTable";
-
- private final static int ROW_COUNT = 100;
- private final static int REGION_COUNT = 9; //initial number of regions of the TestTable
-
- private static final int META_VERSION_092 = 0;
-
- /*
- * This test uses a tgz file named "TestMetaMigrationConvertingToPB.tgz" under
- * hbase-server/src/test/data which contains file data from a 0.92 cluster.
- * The cluster has a table named "TestTable", which has 100 rows. 0.94 has same
- * hbase:meta structure, so it should be the same.
- *
- * hbase(main):001:0> create 'TestTable', 'f1'
- * hbase(main):002:0> for i in 1..100
- * hbase(main):003:1> put 'TestTable', "row#{i}", "f1:c1", i
- * hbase(main):004:1> end
- *
- * There are 9 regions in the table
- */
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- // Start up our mini cluster on top of an 0.92 root.dir that has data from
- // a 0.92 hbase run -- it has a table with 100 rows in it -- and see if
- // we can migrate from 0.92
- TEST_UTIL.startMiniZKCluster();
- TEST_UTIL.startMiniDFSCluster(1);
- Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
- // Untar our test dir.
- File untar = untar(new File(testdir.toString()));
- // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
- Configuration conf = TEST_UTIL.getConfiguration();
- FsShell shell = new FsShell(conf);
- FileSystem fs = FileSystem.get(conf);
- // find where hbase will root itself, so we can copy filesystem there
- Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
- if (!fs.isDirectory(hbaseRootDir.getParent())) {
- // mkdir at first
- fs.mkdirs(hbaseRootDir.getParent());
- }
- doFsCommand(shell,
- new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
-
- //windows fix: tgz file has hbase:meta directory renamed as -META- since the original is an illegal
- //name under windows. So we rename it back. See src/test/data//TestMetaMigrationConvertingToPB.README and
- //https://issues.apache.org/jira/browse/HBASE-6821
- doFsCommand(shell, new String [] {"-mv", new Path(hbaseRootDir, "-META-").toString(),
- new Path(hbaseRootDir, ".META.").toString()});
- // See whats in minihdfs.
- doFsCommand(shell, new String [] {"-lsr", "/"});
-
- //upgrade to namespace as well
- Configuration toolConf = TEST_UTIL.getConfiguration();
- conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
- ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
-
- TEST_UTIL.startMiniHBaseCluster(1, 1);
- // Assert we are running against the copied-up filesystem. The copied-up
- // rootdir should have had a table named 'TestTable' in it. Assert it
- // present.
- HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
- ResultScanner scanner = t.getScanner(new Scan());
- int count = 0;
- while (scanner.next() != null) {
- count++;
- }
- // Assert that we find all 100 rows that are in the data we loaded. If
- // so then we must have migrated it from 0.90 to 0.92.
- Assert.assertEquals(ROW_COUNT, count);
- scanner.close();
- t.close();
- }
-
- private static File untar(final File testdir) throws IOException {
- // Find the src data under src/test/data
- final String datafile = "TestMetaMigrationConvertToPB";
- String srcTarFile =
- System.getProperty("project.build.testSourceDirectory", "src/test") +
- File.separator + "data" + File.separator + datafile + ".tgz";
- File homedir = new File(testdir.toString());
- File tgtUntarDir = new File(homedir, datafile);
- if (tgtUntarDir.exists()) {
- if (!FileUtil.fullyDelete(tgtUntarDir)) {
- throw new IOException("Failed delete of " + tgtUntarDir.toString());
- }
- }
- LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
- FileUtil.unTar(new File(srcTarFile), homedir);
- Assert.assertTrue(tgtUntarDir.exists());
- return tgtUntarDir;
- }
-
- private static void doFsCommand(final FsShell shell, final String [] args)
- throws Exception {
- // Run the 'put' command.
- int errcode = shell.run(args);
- if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
- }
-
- /**
- * @throws java.lang.Exception
- */
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- TEST_UTIL.shutdownMiniCluster();
- }
-
- @Test
- public void testMetaUpdatedFlagInROOT() throws Exception {
- HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
- boolean metaUpdated = MetaMigrationConvertingToPB.
- isMetaTableUpdated(master.getCatalogTracker());
- assertEquals(true, metaUpdated);
- verifyMetaRowsAreUpdated(master.getCatalogTracker());
- }
-
- @Test
- public void testMetaMigration() throws Exception {
- LOG.info("Starting testMetaMigration");
- final byte [] FAMILY = Bytes.toBytes("family");
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMetaMigration"));
- HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
- htd.addFamily(hcd);
- Configuration conf = TEST_UTIL.getConfiguration();
- byte[][] regionNames = new byte[][]{
- HConstants.EMPTY_START_ROW,
- Bytes.toBytes("region_a"),
- Bytes.toBytes("region_b")};
- createMultiRegionsWithWritableSerialization(conf,
- htd.getTableName().getName(),
- regionNames);
- CatalogTracker ct =
- TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
- // Erase the current version of root meta for this test.
- undoVersionInRoot(ct);
- MetaReader.fullScanMetaAndPrint(ct);
- LOG.info("Meta Print completed.testMetaMigration");
-
- long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
- TEST_UTIL.getHBaseCluster().getMaster());
- MetaReader.fullScanMetaAndPrint(ct);
-
- // Should be one entry only and it should be for the table we just added.
- assertEquals(regionNames.length, numMigratedRows);
-
- // Assert that the flag in ROOT is updated to reflect the correct status
- boolean metaUpdated =
- MetaMigrationConvertingToPB.isMetaTableUpdated(
- TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
- assertEquals(true, metaUpdated);
- verifyMetaRowsAreUpdated(ct);
- }
-
- /**
- * This test assumes a master crash/failure during the meta migration process
- * and attempts to continue the meta migration process when a new master takes over.
- * When a master dies during the meta migration we will have some rows of
- * META.CatalogFamily updated with PB serialization and some
- * still hanging with writable serialization. When the backup master/ or
- * fresh start of master attempts the migration it will encounter some rows of META
- * already updated with new HRI and some still legacy. This test will simulate this
- * scenario and validates that the migration process can safely skip the updated
- * rows and migrate any pending rows at startup.
- * @throws Exception
- */
- @Test
- public void testMasterCrashDuringMetaMigration() throws Exception {
- final byte[] FAMILY = Bytes.toBytes("family");
- HTableDescriptor htd = new HTableDescriptor(TableName.valueOf
- ("testMasterCrashDuringMetaMigration"));
- HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
- htd.addFamily(hcd);
- Configuration conf = TEST_UTIL.getConfiguration();
- // Create 10 New regions.
- createMultiRegionsWithPBSerialization(conf, htd.getTableName().getName(), 10);
- // Create 10 Legacy regions.
- createMultiRegionsWithWritableSerialization(conf,
- htd.getTableName().getName(), 10);
- CatalogTracker ct =
- TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
- // Erase the current version of root meta for this test.
- undoVersionInRoot(ct);
-
- MetaReader.fullScanMetaAndPrint(ct);
- LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
-
- long numMigratedRows =
- MetaMigrationConvertingToPB.updateMetaIfNecessary(
- TEST_UTIL.getHBaseCluster().getMaster());
- assertEquals(numMigratedRows, 10);
-
- // Assert that the flag in ROOT is updated to reflect the correct status
- boolean metaUpdated = MetaMigrationConvertingToPB.
- isMetaTableUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
- assertEquals(true, metaUpdated);
-
- verifyMetaRowsAreUpdated(ct);
-
- LOG.info("END testMasterCrashDuringMetaMigration");
- }
-
- /**
- * Verify that every hbase:meta row is updated
- */
- void verifyMetaRowsAreUpdated(CatalogTracker catalogTracker)
- throws IOException {
- List results = MetaReader.fullScan(catalogTracker);
- assertTrue(results.size() >= REGION_COUNT);
-
- for (Result result : results) {
- byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER);
- assertTrue(hriBytes != null && hriBytes.length > 0);
- assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
-
- byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SPLITA_QUALIFIER);
- if (splitA != null && splitA.length > 0) {
- assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
- }
-
- byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
- HConstants.SPLITB_QUALIFIER);
- if (splitB != null && splitB.length > 0) {
- assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
- }
- }
- }
-
- /** Changes the version of hbase:meta to 0 to simulate 0.92 and 0.94 clusters*/
- private void undoVersionInRoot(CatalogTracker ct) throws IOException {
- Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
-
- p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
- Bytes.toBytes(META_VERSION_092));
-
- // TODO wire this MetaEditor.putToRootTable(ct, p);
- LOG.info("Downgraded -ROOT- meta version=" + META_VERSION_092);
- }
-
- /**
- * Inserts multiple regions into hbase:meta using Writable serialization instead of PB
- */
- public int createMultiRegionsWithWritableSerialization(final Configuration c,
- final byte[] tableName, int numRegions) throws IOException {
- if (numRegions < 3) throw new IOException("Must create at least 3 regions");
- byte [] startKey = Bytes.toBytes("aaaaa");
- byte [] endKey = Bytes.toBytes("zzzzz");
- byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
- byte [][] regionStartKeys = new byte[splitKeys.length+1][];
- for (int i=0;i newRegions
- = new ArrayList(startKeys.length);
- int count = 0;
- for (int i = 0; i < startKeys.length; i++) {
- int j = (i + 1) % startKeys.length;
- HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
- Put put = new Put(hri.getRegionName());
- put.setDurability(Durability.SKIP_WAL);
- put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- getBytes(hri)); //this is the old Writable serialization
-
- //also add the region as it's daughters
- put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
- getBytes(hri)); //this is the old Writable serialization
-
- put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
- getBytes(hri)); //this is the old Writable serialization
-
- meta.put(put);
- LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());
-
- newRegions.add(hri);
- count++;
- }
- meta.close();
- return count;
- }
-
- @Deprecated
- private byte[] getBytes(HRegionInfo hri) throws IOException {
- DataOutputBuffer out = new DataOutputBuffer();
- try {
- hri.write(out);
- return out.getData();
- } finally {
- if (out != null) {
- out.close();
- }
- }
- }
-
- /**
- * Inserts multiple regions into hbase:meta using PB serialization
- */
- int createMultiRegionsWithPBSerialization(final Configuration c,
- final byte[] tableName, int numRegions)
- throws IOException {
- if (numRegions < 3) throw new IOException("Must create at least 3 regions");
- byte [] startKey = Bytes.toBytes("aaaaa");
- byte [] endKey = Bytes.toBytes("zzzzz");
- byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
- byte [][] regionStartKeys = new byte[splitKeys.length+1][];
- for (int i=0;i newRegions
- = new ArrayList(startKeys.length);
- int count = 0;
- for (int i = 0; i < startKeys.length; i++) {
- int j = (i + 1) % startKeys.length;
- HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
- Put put = MetaEditor.makePutFromRegionInfo(hri);
- put.setDurability(Durability.SKIP_WAL);
- meta.put(put);
- LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
-
- newRegions.add(hri);
- count++;
- }
- meta.close();
- return count;
- }
-
-
-}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
index 8597916..2a136bd 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -45,13 +47,13 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
- * Test {@link MetaReader}, {@link MetaEditor}.
+ * Test {@link MetaTableAccessor}.
*/
@Category(MediumTests.class)
public class TestMetaReaderEditor {
private static final Log LOG = LogFactory.getLog(TestMetaReaderEditor.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
- private static CatalogTracker CT;
+ private static HConnection hConnection;
@BeforeClass public static void beforeClass() throws Exception {
UTIL.startMiniCluster(3);
@@ -61,17 +63,16 @@ public class TestMetaReaderEditor {
// responsive. 1 second is default as is ten retries.
c.setLong("hbase.client.pause", 1000);
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
- CT = new CatalogTracker(c);
- CT.start();
+ hConnection = HConnectionManager.getConnection(c);
}
@AfterClass public static void afterClass() throws Exception {
- CT.stop();
UTIL.shutdownMiniCluster();
}
/**
- * Does {@link MetaReader#getRegion(CatalogTracker, byte[])} and a write
+ * Does {@link MetaTableAccessor#getRegion(org.apache.hadoop.hbase.client.HConnection,
+ * byte[])} and a write
* against hbase:meta while its hosted server is restarted to prove our retrying
* works.
* @throws IOException
@@ -86,18 +87,18 @@ public class TestMetaReaderEditor {
int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
// Test it works getting a region from just made user table.
final List regions =
- testGettingTableRegions(CT, name, regionCount);
- MetaTask reader = new MetaTask(CT, "reader") {
+ testGettingTableRegions(hConnection, name, regionCount);
+ MetaTask reader = new MetaTask(hConnection, "reader") {
@Override
void metaTask() throws Throwable {
- testGetRegion(this.ct, regions.get(0));
+ testGetRegion(hConnection, regions.get(0));
LOG.info("Read " + regions.get(0).getEncodedName());
}
};
- MetaTask writer = new MetaTask(CT, "writer") {
+ MetaTask writer = new MetaTask(hConnection, "writer") {
@Override
void metaTask() throws Throwable {
- MetaEditor.addRegionToMeta(this.ct, regions.get(0));
+ MetaTableAccessor.addRegionToMeta(hConnection, regions.get(0));
LOG.info("Wrote " + regions.get(0).getEncodedName());
}
};
@@ -148,17 +149,17 @@ public class TestMetaReaderEditor {
}
/**
- * Thread that runs a MetaReader/MetaEditor task until asked stop.
+ * Thread that runs a MetaTableAccessor task until asked stop.
*/
abstract static class MetaTask extends Thread {
boolean stop = false;
int count = 0;
Throwable t = null;
- final CatalogTracker ct;
+ final HConnection hConnection;
- MetaTask(final CatalogTracker ct, final String name) {
+ MetaTask(final HConnection hConnection, final String name) {
super(name);
- this.ct = ct;
+ this.hConnection = hConnection;
}
@Override
@@ -199,23 +200,24 @@ public class TestMetaReaderEditor {
@Test public void testGetRegionsCatalogTables()
throws IOException, InterruptedException {
List regions =
- MetaReader.getTableRegions(CT, TableName.META_TABLE_NAME);
+ MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, TableName.META_TABLE_NAME);
assertTrue(regions.size() >= 1);
- assertTrue(MetaReader.getTableRegionsAndLocations(CT,
- TableName.META_TABLE_NAME).size() >= 1);
+ assertTrue(MetaTableAccessor.getTableRegionsAndLocations(UTIL.getZooKeeperWatcher(),
+ hConnection,TableName.META_TABLE_NAME).size() >= 1);
}
@Test public void testTableExists() throws IOException {
final TableName name =
TableName.valueOf("testTableExists");
- assertFalse(MetaReader.tableExists(CT, name));
+ assertFalse(MetaTableAccessor.tableExists(hConnection, name));
UTIL.createTable(name, HConstants.CATALOG_FAMILY);
- assertTrue(MetaReader.tableExists(CT, name));
+ assertTrue(MetaTableAccessor.tableExists(hConnection, name));
HBaseAdmin admin = UTIL.getHBaseAdmin();
admin.disableTable(name);
admin.deleteTable(name);
- assertFalse(MetaReader.tableExists(CT, name));
- assertTrue(MetaReader.tableExists(CT,
+ assertFalse(MetaTableAccessor.tableExists(hConnection, name));
+ assertTrue(MetaTableAccessor.tableExists(hConnection,
TableName.META_TABLE_NAME));
}
@@ -224,7 +226,7 @@ public class TestMetaReaderEditor {
LOG.info("Started " + name);
// Test get on non-existent region.
Pair pair =
- MetaReader.getRegion(CT, Bytes.toBytes("nonexistent-region"));
+ MetaTableAccessor.getRegion(hConnection, Bytes.toBytes("nonexistent-region"));
assertNull(pair);
LOG.info("Finished " + name);
}
@@ -249,27 +251,30 @@ public class TestMetaReaderEditor {
// Now make sure we only get the regions from 1 of the tables at a time
- assertEquals(1, MetaReader.getTableRegions(CT, name).size());
- assertEquals(1, MetaReader.getTableRegions(CT, greaterName).size());
+ assertEquals(1, MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, name).size());
+ assertEquals(1, MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, greaterName).size());
}
- private static List testGettingTableRegions(final CatalogTracker ct,
+ private static List testGettingTableRegions(final HConnection hConnection,
final TableName name, final int regionCount)
throws IOException, InterruptedException {
- List regions = MetaReader.getTableRegions(ct, name);
+ List regions = MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, name);
assertEquals(regionCount, regions.size());
Pair pair =
- MetaReader.getRegion(ct, regions.get(0).getRegionName());
+ MetaTableAccessor.getRegion(hConnection, regions.get(0).getRegionName());
assertEquals(regions.get(0).getEncodedName(),
pair.getFirst().getEncodedName());
return regions;
}
- private static void testGetRegion(final CatalogTracker ct,
+ private static void testGetRegion(final HConnection hConnection,
final HRegionInfo region)
throws IOException, InterruptedException {
Pair pair =
- MetaReader.getRegion(ct, region.getRegionName());
+ MetaTableAccessor.getRegion(hConnection, region.getRegionName());
assertEquals(region.getEncodedName(),
pair.getFirst().getEncodedName());
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
index 6ae0ecd..dd1300e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
@@ -63,7 +63,7 @@ import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
- * Test MetaReader/Editor but without spinning up a cluster.
+ * Test MetaTableAccessor/Editor but without spinning up a cluster.
* We mock regionserver back and forth (we do spin up a zk cluster).
*/
@Category(MediumTests.class)
@@ -124,7 +124,7 @@ public class TestMetaReaderEditorNoCluster {
}
/**
- * Test that MetaReader will ride over server throwing
+ * Test that MetaTableAccessor will ride over server throwing
* "Server not running" IOEs.
* @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
* @throws IOException
@@ -140,7 +140,6 @@ public class TestMetaReaderEditorNoCluster {
ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
HConnection connection;
- CatalogTracker ct = null;
try {
// Mock an ClientProtocol. Our mock implementation will fail a few
// times when we go to open a scanner.
@@ -204,11 +203,9 @@ public class TestMetaReaderEditorNoCluster {
Mockito.doReturn(implementation).
when(connection).getClient(Mockito.any(ServerName.class));
- // Now start up the catalogtracker with our doctored Connection.
- ct = new CatalogTracker(zkw, null, connection, ABORTABLE);
- ct.start();
// Scan meta for user tables and verify we got back expected answer.
- NavigableMap hris = MetaReader.getServerUserRegions(ct, sn);
+ NavigableMap hris =
+ MetaTableAccessor.getServerUserRegions(connection, sn);
assertEquals(1, hris.size());
assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO));
assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
@@ -217,7 +214,6 @@ public class TestMetaReaderEditorNoCluster {
Mockito.verify(implementation, Mockito.times(6)).
scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any());
} finally {
- if (ct != null) ct.stop();
HConnectionManager.deleteConnection(UTIL.getConfiguration());
zkw.close();
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaRegionLocator.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaRegionLocator.java
new file mode 100644
index 0000000..ac91174
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaRegionLocator.java
@@ -0,0 +1,368 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.catalog;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.util.Progressable;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * Test {@link org.apache.hadoop.hbase.zookeeper.MetaRegionLocator}
+ */
+@Category(MediumTests.class)
+public class TestMetaRegionLocator {
+ private static final Log LOG = LogFactory.getLog(TestMetaRegionLocator.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final ServerName SN =
+ ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
+ private ZooKeeperWatcher watcher;
+ private Abortable abortable;
+
+ @BeforeClass public static void beforeClass() throws Exception {
+ // Set this down so tests run quicker
+ UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
+ UTIL.startMiniZKCluster();
+ }
+
+ @AfterClass public static void afterClass() throws IOException {
+ UTIL.getZkCluster().shutdown();
+ }
+
+ @Before public void before() throws IOException {
+ this.abortable = new Abortable() {
+ @Override
+ public void abort(String why, Throwable e) {
+ LOG.info(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return false;
+ }
+ };
+ this.watcher = new ZooKeeperWatcher(UTIL.getConfiguration(),
+ this.getClass().getSimpleName(), this.abortable, true);
+ }
+
+ @After public void after() {
+ try {
+ // Clean out meta location or later tests will be confused... they presume
+ // start fresh in zk.
+ MetaRegionLocator.deleteMetaLocation(this.watcher);
+ } catch (KeeperException e) {
+ LOG.warn("Unable to delete hbase:meta location", e);
+ }
+
+ // Clear out our doctored connection or could mess up subsequent tests.
+ HConnectionManager.deleteConnection(UTIL.getConfiguration());
+
+ this.watcher.close();
+ }
+
+ /**
+ * Test that we get notification if hbase:meta moves.
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ */
+ @Test public void testThatIfMETAMovesWeAreNotified()
+ throws IOException, InterruptedException, KeeperException {
+ MetaRegionLocator.setMetaLocation(this.watcher,
+ ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
+ }
+
+ /**
+ * Test interruptable while blocking wait on meta.
+ * @throws IOException
+ * @throws ServiceException
+ * @throws InterruptedException
+ */
+ @Test public void testInterruptWaitOnMeta()
+ throws IOException, InterruptedException, ServiceException {
+ final ClientProtos.ClientService.BlockingInterface client =
+ Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+
+ Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
+ thenReturn(GetResponse.newBuilder().build());
+ ServerName meta = MetaRegionLocator.getMetaRegionLocation(this.watcher);
+ Assert.assertNull(meta);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ MetaRegionLocator.waitMetaRegionLocation(watcher);
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Interrupted", e);
+ }
+ }
+ };
+ t.start();
+ while (!t.isAlive())
+ Threads.sleep(1);
+ Threads.sleep(1);
+ assertTrue(t.isAlive());
+ // Join the thread... should exit shortly.
+ t.join();
+ }
+
+ private void testVerifyMetaRegionLocationWithException(Exception ex)
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ // Mock an ClientProtocol.
+ final ClientProtos.ClientService.BlockingInterface implementation =
+ Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+ HConnection connection = mockConnection(null, implementation);
+
+ // If a 'get' is called on mocked interface, throw connection refused.
+ Mockito.when(implementation.get((RpcController) Mockito.any(), (GetRequest) Mockito.any())).
+ thenThrow(new ServiceException(ex));
+
+ MetaRegionLocator.setMetaLocation(this.watcher, SN);
+ long timeout = UTIL.getConfiguration().
+ getLong("hbase.catalog.verification.timeout", 1000);
+ Assert.assertFalse(MetaRegionLocator.verifyMetaRegionLocation(connection, watcher, timeout));
+ }
+
+ /**
+ * Test we survive a connection refused {@link ConnectException}
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ * @throws ServiceException
+ */
+ @Test
+ public void testGetMetaServerConnectionFails()
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ testVerifyMetaRegionLocationWithException(new ConnectException("Connection refused"));
+ }
+
+ /**
+ * Test that verifyMetaRegionLocation properly handles getting a
+ * ServerNotRunningException. See HBASE-4470.
+ * Note this doesn't check the exact exception thrown in the
+ * HBASE-4470 as there it is thrown from getHConnection() and
+ * here it is thrown from get() -- but those are both called
+ * from the same function anyway, and this way is less invasive than
+ * throwing from getHConnection would be.
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ * @throws ServiceException
+ */
+ @Test
+ public void testVerifyMetaRegionServerNotRunning()
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ testVerifyMetaRegionLocationWithException(new ServerNotRunningYetException("mock"));
+ }
+
+ /**
+ * Test get of meta region fails properly if nothing to connect to.
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ * @throws ServiceException
+ */
+ @Test
+ public void testVerifyMetaRegionLocationFails()
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ HConnection connection = Mockito.mock(HConnection.class);
+ ServiceException connectException =
+ new ServiceException(new ConnectException("Connection refused"));
+ final AdminProtos.AdminService.BlockingInterface implementation =
+ Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
+ Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
+ (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
+ Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())).
+ thenReturn(implementation);
+
+ MetaRegionLocator.setMetaLocation(this.watcher,
+ ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
+ Assert.assertFalse(MetaRegionLocator.verifyMetaRegionLocation(connection, watcher, 100));
+ }
+
+ @Test (expected = NotAllMetaRegionsOnlineException.class)
+ public void testTimeoutWaitForMeta()
+ throws IOException, InterruptedException {
+ MetaRegionLocator.waitMetaRegionLocation(watcher, 100);
+ }
+
+ /**
+ * Test waiting on meat w/ no timeout specified.
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ */
+ @Test public void testNoTimeoutWaitForMeta()
+ throws IOException, InterruptedException, KeeperException {
+ HConnection connection = Mockito.mock(HConnection.class);
+ ServerName hsa = MetaRegionLocator.getMetaRegionLocation(watcher);
+ Assert.assertNull(hsa);
+
+ // Now test waiting on meta location getting set.
+ Thread t = new WaitOnMetaThread();
+ startWaitAliveThenWaitItLives(t, 1);
+ // Set a meta location.
+ hsa = setMetaLocation();
+ // Join the thread... should exit shortly.
+ t.join();
+ // Now meta is available.
+ Assert.assertTrue(MetaRegionLocator.getMetaRegionLocation(watcher).equals(hsa));
+ }
+
+ private ServerName setMetaLocation() throws KeeperException {
+ MetaRegionLocator.setMetaLocation(this.watcher, SN);
+ return SN;
+ }
+
+ /**
+ * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
+ * want to pass a mocked HRS; can be null.
+ * @param client A mocked ClientProtocol instance, can be null
+ * @return Mock up a connection that returns a {@link Configuration} when
+ * {@link HConnection#getConfiguration()} is called, a 'location' when
+ * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
+ * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
+ * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
+ * {@link ClientProtos.ClientService.BlockingInterface} instance when
+ * {@link HConnection#getClient(ServerName)} is called (Be sure to call
+ * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)}
+ * when done with this mocked Connection.
+ * @throws IOException
+ */
+ private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
+ final ClientProtos.ClientService.BlockingInterface client)
+ throws IOException {
+ HConnection connection =
+ HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
+ Mockito.doNothing().when(connection).close();
+ // Make it so we return any old location when asked.
+ final HRegionLocation anyLocation =
+ new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
+ Mockito.when(connection.getRegionLocation((TableName) Mockito.any(),
+ (byte[]) Mockito.any(), Mockito.anyBoolean())).
+ thenReturn(anyLocation);
+ Mockito.when(connection.locateRegion((TableName) Mockito.any(),
+ (byte[]) Mockito.any())).
+ thenReturn(anyLocation);
+ if (admin != null) {
+ // If a call to getHRegionConnection, return this implementation.
+ Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).
+ thenReturn(admin);
+ }
+ if (client != null) {
+ // If a call to getClient, return this implementation.
+ Mockito.when(connection.getClient(Mockito.any(ServerName.class))).
+ thenReturn(client);
+ }
+ return connection;
+ }
+
+ /**
+ * @return A mocked up Result that fakes a Get on a row in the
+ * hbase:meta table.
+ * @throws IOException
+ */
+ private Result getMetaTableRowResult() throws IOException {
+ return MetaMockingUtil.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO, SN);
+ }
+
+ private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
+ t.start();
+ while(!t.isAlive()) {
+ // Wait
+ }
+ // Wait one second.
+ Threads.sleep(ms);
+ Assert.assertTrue("Assert " + t.getName() + " still waiting", t.isAlive());
+ }
+
+ class CountingProgressable implements Progressable {
+ final AtomicInteger counter = new AtomicInteger(0);
+ @Override
+ public void progress() {
+ this.counter.incrementAndGet();
+ }
+ }
+
+ /**
+ * Wait on META.
+ */
+ class WaitOnMetaThread extends Thread {
+
+ WaitOnMetaThread() {
+ super("WaitOnMeta");
+ }
+
+ @Override
+ public void run() {
+ try {
+ doWaiting();
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Failed wait", e);
+ }
+ LOG.info("Exiting " + getName());
+ }
+
+ void doWaiting() throws InterruptedException {
+ while (MetaRegionLocator.waitMetaRegionLocation(watcher, 100) == null);
+ }
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
index 54b751d..caf3c61 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -1764,18 +1763,13 @@ public class TestAdmin {
final byte [] nameBytes = Bytes.toBytes(name);
HTable t = TEST_UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY);
TEST_UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
- CatalogTracker ct = new CatalogTracker(TEST_UTIL.getConfiguration());
- ct.start();
- try {
- HRegionLocation regionLocation = t.getRegionLocation("mmm");
- HRegionInfo region = regionLocation.getRegionInfo();
- byte[] regionName = region.getRegionName();
- Pair pair = admin.getRegion(regionName, ct);
- assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
- pair = admin.getRegion(region.getEncodedNameAsBytes(), ct);
- assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
- } finally {
- ct.stop();
- }
+
+ HRegionLocation regionLocation = t.getRegionLocation("mmm");
+ HRegionInfo region = regionLocation.getRegionInfo();
+ byte[] regionName = region.getRegionName();
+ Pair pair = admin.getRegion(regionName);
+ assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
+ pair = admin.getRegion(region.getEncodedNameAsBytes());
+ assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
}
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
index 4e5a656..192ef19 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
@@ -40,8 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.StoppableImplementation;
import org.apache.hadoop.hbase.util.Threads;
@@ -50,6 +49,7 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+
@Category(MediumTests.class)
public class TestMetaScanner {
final Log LOG = LogFactory.getLog(getClass());
@@ -127,8 +127,6 @@ public class TestMetaScanner {
TableName.valueOf("testConcurrentMetaScannerAndCatalogJanitor");
final byte[] FAMILY = Bytes.toBytes("family");
TEST_UTIL.createTable(TABLENAME, FAMILY);
- final CatalogTracker catalogTracker = mock(CatalogTracker.class);
- when(catalogTracker.getConnection()).thenReturn(TEST_UTIL.getHBaseAdmin().getConnection());
class RegionMetaSplitter extends StoppableImplementation implements Runnable {
Random random = new Random();
@@ -169,7 +167,8 @@ public class TestMetaScanner {
Bytes.toBytes(midKey),
end);
- MetaEditor.splitRegion(catalogTracker, parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0));
+ MetaTableAccessor.splitRegion(TEST_UTIL.getHBaseAdmin().getConnection(),
+ parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0));
Threads.sleep(random.nextInt(200));
} catch (Throwable e) {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
index e971e4f..7fbd45f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
@@ -165,7 +165,7 @@ public class TestScannerTimeout {
Scan scan = new Scan();
scan.setCaching(SCANNER_CACHING);
LOG.info("************ TEST3686A");
- MetaReader.fullScanMetaAndPrint(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker());
+ MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getHBaseAdmin().getConnection());
// Set a very high timeout, we want to test what happens when a RS
// fails but the region is recovered before the lease times out.
// Since the RS is already created, this conf is client-side only for
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
index ada3e6f..ec73d7d 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -152,7 +152,7 @@ public class TestRegionServerObserver {
mergedRegion = rmt.stepsBeforePONR(rs, rs, false);
rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(),
regionB.getRegionInfo(), rs.getServerName(), metaEntries);
- MetaEditor.mutateMetaTable(rs.getCatalogTracker(), metaEntries);
+ MetaTableAccessor.mutateMetaTable(rs.getShortCircuitConnection(), metaEntries);
}
@Override
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index f302f10..55a5763 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -45,10 +45,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -474,11 +473,12 @@ public class TestLoadIncrementalHFilesSplitRecovery {
dir = buildBulkFiles(tableName, 3);
// Mess it up by leaving a hole in the hbase:meta
- CatalogTracker ct = new CatalogTracker(util.getConfiguration());
- List regionInfos = MetaReader.getTableRegions(ct, TableName.valueOf(tableName));
+ HConnection hConnection = HConnectionManager.getConnection(util.getConfiguration());
+ List regionInfos = MetaTableAccessor.getTableRegions(
+ util.getZooKeeperWatcher(), hConnection, TableName.valueOf(tableName));
for (HRegionInfo regionInfo : regionInfos) {
if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
- MetaEditor.deleteRegion(ct, regionInfo);
+ MetaTableAccessor.deleteRegion(hConnection, regionInfo);
break;
}
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 0924dd2..09c0242 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -274,8 +274,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
}
@Override
- public CatalogTracker getCatalogTracker() {
- // TODO Auto-generated method stub
+ public HConnection getShortCircuitConnection() {
return null;
}
@@ -312,7 +311,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
}
@Override
- public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
+ public void postOpenDeployTasks(HRegion r)
throws KeeperException, IOException {
// TODO Auto-generated method stub
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 1c010b2..a0413e4 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -302,7 +302,7 @@ public class TestActiveMasterManager {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
index f5e8952..feb28a0 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
@@ -382,12 +381,11 @@ public class TestAssignmentManager {
ExecutorService executor = startupMasterExecutor("testBalanceExecutor");
// We need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
- this.serverManager, ct, balancer, executor, null, master.getTableLockManager());
+ this.serverManager, balancer, executor, null, master.getTableLockManager());
am.failoverCleanupDone.set(true);
try {
// Make sure our new AM gets callbacks; once registered, can't unregister.
@@ -456,13 +454,11 @@ public class TestAssignmentManager {
// handling zk callbacks.
ExecutorService executor = startupMasterExecutor("testShutdownHandler");
- // We need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
// Create an AM.
AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(
this.server, this.serverManager);
try {
- processServerShutdownHandler(ct, am, false);
+ processServerShutdownHandler(am, false);
} finally {
executor.shutdown();
am.shutdown();
@@ -510,7 +506,6 @@ public class TestAssignmentManager {
// handling zk callbacks.
ExecutorService executor = startupMasterExecutor("testSSHWhenSplitRegionInProgress");
// We need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
ZKAssign.deleteAllNodes(this.watcher);
// Create an AM.
@@ -530,7 +525,7 @@ public class TestAssignmentManager {
ZKUtil.createAndWatch(this.watcher, node, data.toByteArray());
try {
- processServerShutdownHandler(ct, am, regionSplitDone);
+ processServerShutdownHandler(am, regionSplitDone);
// check znode deleted or not.
// In both cases the znode should be deleted.
@@ -558,14 +553,12 @@ public class TestAssignmentManager {
// Create and startup an executor. This is used by AssignmentManager
// handling zk callbacks.
ExecutorService executor = startupMasterExecutor("testSSHWhenDisableTableInProgress");
- // We need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
ZKAssign.deleteAllNodes(this.watcher);
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
- this.serverManager, ct, balancer, executor, null, master.getTableLockManager());
+ this.serverManager, balancer, executor, null, master.getTableLockManager());
// adding region to regions and servers maps.
am.regionOnline(REGIONINFO, SERVERNAME_A);
// adding region in pending close.
@@ -587,7 +580,7 @@ public class TestAssignmentManager {
ZKUtil.createAndWatch(this.watcher, node, data.toByteArray());
try {
- processServerShutdownHandler(ct, am, false);
+ processServerShutdownHandler(am, false);
// check znode deleted or not.
// In both cases the znode should be deleted.
assertTrue("The znode should be deleted.", ZKUtil.checkExists(this.watcher, node) == -1);
@@ -607,7 +600,7 @@ public class TestAssignmentManager {
}
}
- private void processServerShutdownHandler(CatalogTracker ct, AssignmentManager am, boolean splitRegion)
+ private void processServerShutdownHandler(AssignmentManager am, boolean splitRegion)
throws IOException, ServiceException {
// Make sure our new AM gets callbacks; once registered, can't unregister.
// Thats ok because we make a new zk watcher for each test.
@@ -652,8 +645,7 @@ public class TestAssignmentManager {
// Make it so we can get a catalogtracker from servermanager.. .needed
// down in guts of server shutdown handler.
- Mockito.when(ct.getConnection()).thenReturn(connection);
- Mockito.when(this.server.getCatalogTracker()).thenReturn(ct);
+ Mockito.when(this.server.getShortCircuitConnection()).thenReturn(connection);
// Now make a server shutdown handler instance and invoke process.
// Have it that SERVERNAME_A died.
@@ -697,12 +689,11 @@ public class TestAssignmentManager {
// default null.
Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
// Need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
- this.serverManager, ct, balancer, null, null, master.getTableLockManager());
+ this.serverManager, balancer, null, null, master.getTableLockManager());
try {
// First make sure my mock up basically works. Unassign a region.
unassign(am, SERVERNAME_A, hri);
@@ -878,7 +869,6 @@ public class TestAssignmentManager {
am.getRegionStates().logSplit(SERVERNAME_A); // Assume log splitting is done
am.getRegionStates().createRegionState(REGIONINFO);
am.gate.set(false);
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
BaseCoordinatedStateManager cp = new ZkCoordinatedStateManager();
cp.initialize(server);
@@ -892,7 +882,7 @@ public class TestAssignmentManager {
assertFalse(am.processRegionsInTransition(rt, REGIONINFO, orc, zkOrd));
am.getTableStateManager().setTableState(REGIONINFO.getTable(), Table.State.ENABLED);
- processServerShutdownHandler(ct, am, false);
+ processServerShutdownHandler(am, false);
// Waiting for the assignment to get completed.
while (!am.gate.get()) {
Thread.sleep(10);
@@ -980,7 +970,7 @@ public class TestAssignmentManager {
am.getTableStateManager().setTableState(REGIONINFO.getTable(),
Table.State.ENABLING);
new EnableTableHandler(server, REGIONINFO.getTable(),
- am.getCatalogTracker(), am, new NullTableLockManager(), true).prepare()
+ am, new NullTableLockManager(), true).prepare()
.process();
assertEquals("Number of assignments should be 1.", 1, assignmentCount);
assertTrue("Table should be enabled.",
@@ -1036,8 +1026,6 @@ public class TestAssignmentManager {
@Test
public void testSSHTimesOutOpeningRegionTransition()
throws KeeperException, IOException, CoordinatedStateException, ServiceException {
- // We need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
// Create an AM.
AssignmentManagerWithExtrasForTesting am =
setUpMockedAssignmentManager(this.server, this.serverManager);
@@ -1054,7 +1042,7 @@ public class TestAssignmentManager {
try {
am.assignInvoked = false;
- processServerShutdownHandler(ct, am, false);
+ processServerShutdownHandler(am, false);
assertTrue(am.assignInvoked);
} finally {
am.getRegionStates().regionsInTransition.remove(REGIONINFO.getEncodedName());
@@ -1148,8 +1136,6 @@ public class TestAssignmentManager {
private AssignmentManagerWithExtrasForTesting setUpMockedAssignmentManager(final Server server,
final ServerManager manager) throws IOException, KeeperException,
ServiceException, CoordinatedStateException {
- // We need a mocked catalog tracker. Its used by our AM instance.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
// Make an RS Interface implementation. Make it so a scanner can go against
// it and a get to return the single region, REGIONINFO, this test is
// messing with. Needed when "new master" joins cluster. AM will try and
@@ -1193,12 +1179,11 @@ public class TestAssignmentManager {
getMockedConnectionAndDecorate(HTU.getConfiguration(), null,
ri, SERVERNAME_B, REGIONINFO);
// Make it so we can get the connection from our mocked catalogtracker
- Mockito.when(ct.getConnection()).thenReturn(connection);
// Create and startup an executor. Used by AM handling zk callbacks.
ExecutorService executor = startupMasterExecutor("mockedAMExecutor");
this.balancer = LoadBalancerFactory.getLoadBalancer(server.getConfiguration());
AssignmentManagerWithExtrasForTesting am = new AssignmentManagerWithExtrasForTesting(
- server, manager, ct, this.balancer, executor, new NullTableLockManager());
+ server, manager, this.balancer, executor, new NullTableLockManager());
return am;
}
@@ -1208,20 +1193,17 @@ public class TestAssignmentManager {
class AssignmentManagerWithExtrasForTesting extends AssignmentManager {
// Keep a reference so can give it out below in {@link #getExecutorService}
private final ExecutorService es;
- // Ditto for ct
- private final CatalogTracker ct;
boolean processRITInvoked = false;
boolean assignInvoked = false;
AtomicBoolean gate = new AtomicBoolean(true);
public AssignmentManagerWithExtrasForTesting(
final Server master, final ServerManager serverManager,
- final CatalogTracker catalogTracker, final LoadBalancer balancer,
+ final LoadBalancer balancer,
final ExecutorService service, final TableLockManager tableLockManager)
throws KeeperException, IOException, CoordinatedStateException {
- super(master, serverManager, catalogTracker, balancer, service, null, tableLockManager);
+ super(master, serverManager, balancer, service, null, tableLockManager);
this.es = service;
- this.ct = catalogTracker;
}
@Override
@@ -1274,13 +1256,6 @@ public class TestAssignmentManager {
ExecutorService getExecutorService() {
return this.es;
}
-
- /**
- * @return CatalogTracker used by this AM (Its a mock).
- */
- CatalogTracker getCatalogTracker() {
- return this.ct;
- }
}
/**
@@ -1326,12 +1301,11 @@ public class TestAssignmentManager {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// Need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
- this.serverManager, ct, balancer, null, null, master.getTableLockManager());
+ this.serverManager, balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
try {
// First set the state of the region to merging
@@ -1360,14 +1334,12 @@ public class TestAssignmentManager {
CoordinatedStateException {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
- // Need a mocked catalog tracker.
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
final AtomicBoolean zkEventProcessed = new AtomicBoolean(false);
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
- this.serverManager, ct, balancer, null, null, master.getTableLockManager()) {
+ this.serverManager, balancer, null, null, master.getTableLockManager()) {
@Override
void handleRegion(final RegionTransition rt, OpenRegionCoordination coordination,
@@ -1408,9 +1380,8 @@ public class TestAssignmentManager {
*/
@Test
public void testBalanceRegionOfDeletedTable() throws Exception {
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
AssignmentManager am = new AssignmentManager(this.server, this.serverManager,
- ct, balancer, null, null, master.getTableLockManager());
+ balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
HRegionInfo hri = REGIONINFO;
regionStates.createRegionState(hri);
@@ -1436,12 +1407,11 @@ public class TestAssignmentManager {
this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);
HRegionInfo hri = REGIONINFO;
- CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
- this.serverManager, ct, balancer, null, null, master.getTableLockManager());
+ this.serverManager, balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
try {
am.regionPlans.put(REGIONINFO.getEncodedName(),
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index a6cd57a..131eea9 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -113,7 +113,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri);
@@ -157,7 +157,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
final HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
master = TEST_UTIL.getHBaseCluster().getMaster();
Set onlineServers = master.serverManager.getOnlineServers().keySet();
@@ -367,7 +367,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri);
@@ -414,7 +414,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri);
@@ -458,7 +458,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
MyLoadBalancer.controledRegion = hri.getEncodedName();
@@ -500,7 +500,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
FileSystem fs = FileSystem.get(conf);
Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table);
@@ -596,7 +596,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri);
@@ -648,7 +648,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
MyRegionObserver.postOpenEnabled.set(true);
@@ -723,7 +723,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
// Assign the region
master = (MyMaster)cluster.getMaster();
@@ -797,7 +797,7 @@ public class TestAssignmentManagerOnCluster {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
// Assign the region
master = (MyMaster)cluster.getMaster();
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 3b69c16..d201624 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -97,7 +96,6 @@ public class TestCatalogJanitor {
class MockServer implements Server {
private final HConnection connection;
private final Configuration c;
- private final CatalogTracker ct;
MockServer(final HBaseTestingUtility htu)
throws NotAllMetaRegionsOnlineException, IOException, InterruptedException {
@@ -137,16 +135,14 @@ public class TestCatalogJanitor {
FileSystem fs = FileSystem.get(this.c);
Path rootdir = FSUtils.getRootDir(this.c);
FSUtils.setRootDir(this.c, rootdir);
- this.ct = Mockito.mock(CatalogTracker.class);
AdminProtos.AdminService.BlockingInterface hri =
Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
- Mockito.when(this.ct.getConnection()).thenReturn(this.connection);
- Mockito.when(ct.waitForMetaServerConnection(Mockito.anyLong())).thenReturn(hri);
+
}
@Override
- public CatalogTracker getCatalogTracker() {
- return this.ct;
+ public HConnection getShortCircuitConnection() {
+ return this.connection;
}
@Override
@@ -186,9 +182,6 @@ public class TestCatalogJanitor {
@Override
public void stop(String why) {
- if (this.ct != null) {
- this.ct.stop();
- }
if (this.connection != null) {
HConnectionManager.deleteConnection(this.connection.getConfiguration());
}
@@ -254,7 +247,7 @@ public class TestCatalogJanitor {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
index 687012c..6479feb 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
@@ -26,8 +26,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -42,7 +42,7 @@ public class TestClockSkewDetection {
final Configuration conf = HBaseConfiguration.create();
ServerManager sm = new ServerManager(new Server() {
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index d0ddc96..b3b2828 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.master;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
@@ -74,7 +74,8 @@ public class TestMaster {
ht.close();
List> tableRegions =
- MetaReader.getTableRegionsAndLocations(m.getCatalogTracker(), TABLENAME);
+ MetaTableAccessor.getTableRegionsAndLocations(m.getZooKeeper(),
+ m.getShortCircuitConnection(), TABLENAME);
LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
assertEquals(1, tableRegions.size());
assertArrayEquals(HConstants.EMPTY_START_ROW,
@@ -91,8 +92,8 @@ public class TestMaster {
Thread.sleep(100);
}
LOG.info("Making sure we can call getTableRegions while opening");
- tableRegions = MetaReader.getTableRegionsAndLocations(m.getCatalogTracker(),
- TABLENAME, false);
+ tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getZooKeeper(),
+ m.getShortCircuitConnection(), TABLENAME, false);
LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
// We have three regions because one is split-in-progress
@@ -102,8 +103,8 @@ public class TestMaster {
m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
LOG.info("Result is: " + pair);
Pair tableRegionFromName =
- MetaReader.getRegion(m.getCatalogTracker(),
- pair.getFirst().getRegionName());
+ MetaTableAccessor.getRegion(m.getShortCircuitConnection(),
+ pair.getFirst().getRegionName());
assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 1ba4e1b..60ffcd9 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -63,7 +63,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -294,7 +294,7 @@ public class TestMasterFailover {
ProtobufUtil.openRegion(hrs.getRSRpcServices(),
hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
while (true) {
- ServerName sn = MetaRegionTracker.getMetaRegionLocation(zkw);
+ ServerName sn = MetaRegionLocator.getMetaRegionLocation(zkw);
if (sn != null && sn.equals(hrs.getServerName())) {
break;
}
@@ -684,7 +684,7 @@ public class TestMasterFailover {
ProtobufUtil.openRegion(hrs.getRSRpcServices(),
hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
while (true) {
- ServerName sn = MetaRegionTracker.getMetaRegionLocation(zkw);
+ ServerName sn = MetaRegionLocator.getMetaRegionLocation(zkw);
if (sn != null && sn.equals(hrs.getServerName())) {
break;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index d7cdc73..4754c03 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaMockingUtil;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
@@ -54,7 +53,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -159,7 +158,7 @@ public class TestMasterNoCluster {
final MockRegionServer rs2 = new MockRegionServer(conf, sn2);
// Put some data into the servers. Make it look like sn0 has the metaH
// Put data into sn2 so it looks like it has a few regions for a table named 't'.
- MetaRegionTracker.setMetaLocation(rs0.getZooKeeper(), rs0.getServerName());
+ MetaRegionLocator.setMetaLocation(rs0.getZooKeeper(), rs0.getServerName());
final TableName tableName = TableName.valueOf("t");
Result [] results = new Result [] {
MetaMockingUtil.getMetaTableRowResult(
@@ -206,15 +205,16 @@ public class TestMasterNoCluster {
}
@Override
- protected CatalogTracker createCatalogTracker() throws IOException {
- // Insert a mock for the connection used by the CatalogTracker. Any
- // regionserver should do. Use TESTUTIL.getConfiguration rather than
+ public HConnection getShortCircuitConnection() {
+ // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
// the conf from the master; the conf will already have an HConnection
// associate so the below mocking of a connection will fail.
- HConnection connection =
- HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(),
+ try {
+ return HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(),
rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
- return new CatalogTracker(getZooKeeper(), getConfiguration(), connection, this);
+ } catch (IOException e) {
+ return null;
+ }
}
@Override
@@ -285,15 +285,16 @@ public class TestMasterNoCluster {
}
@Override
- protected CatalogTracker createCatalogTracker() throws IOException {
- // Insert a mock for the connection used by the CatalogTracker. Any
- // regionserver should do. Use TESTUTIL.getConfiguration rather than
+ public HConnection getShortCircuitConnection() {
+ // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
// the conf from the master; the conf will already have an HConnection
// associate so the below mocking of a connection will fail.
- HConnection connection =
- HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(),
+ try {
+ return HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(),
rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
- return new CatalogTracker(getZooKeeper(), getConfiguration(), connection, this);
+ } catch (IOException e) {
+ return null;
+ }
}
@Override
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 8f62ec0..d010500 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -215,7 +215,7 @@ public class TestHFileCleaner {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index e4a26f0..8c6f359 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -151,7 +151,7 @@ public class TestHFileLinkCleaner {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index d26d354..52aa766 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationQueues;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -157,7 +157,7 @@ public class TestLogsCleaner {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 14a44fa..dd1d183 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -123,7 +123,7 @@ public class TestEndToEndSplitTransaction {
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
- server.postOpenDeployTasks(regions.getSecond(), server.getCatalogTracker());
+ server.postOpenDeployTasks(regions.getSecond());
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
@@ -133,7 +133,7 @@ public class TestEndToEndSplitTransaction {
assertTrue(test(con, tableName, lastRow, server));
// first daughter second
- server.postOpenDeployTasks(regions.getFirst(), server.getCatalogTracker());
+ server.postOpenDeployTasks(regions.getFirst());
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(con, tableName, firstRow, server));
@@ -289,7 +289,7 @@ public class TestEndToEndSplitTransaction {
}
/**
- * Checks regions using MetaScanner, MetaReader and HTable methods
+ * Checks regions using MetaScanner, MetaTableAccessor and HTable methods
*/
static class RegionChecker extends Chore {
Configuration conf;
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index 8a5fbfe..85967f9 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
@@ -83,7 +83,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i),
i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval));
- Put put = MetaEditor.makePutFromRegionInfo(hri);
+ Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
put.setDurability(Durability.SKIP_WAL);
mr.put(put);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 77889d5..2ee953f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
@@ -425,7 +425,7 @@ public class TestHeapMemoryManager {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index aac801e..76a0b35 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@@ -168,14 +168,14 @@ public class TestRegionMergeTransactionOnCluster {
verifyRowCount(table, ROWSIZE);
table.close();
- List> tableRegions = MetaReader
- .getTableRegionsAndLocations(master.getCatalogTracker(),
- tableName);
+ List> tableRegions = MetaTableAccessor
+ .getTableRegionsAndLocations(master.getZooKeeper(),
+ master.getShortCircuitConnection(), tableName);
HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
HTableDescriptor tableDescritor = master.getTableDescriptors().get(
tableName);
- Result mergedRegionResult = MetaReader.getRegionResult(
- master.getCatalogTracker(), mergedRegionInfo.getRegionName());
+ Result mergedRegionResult = MetaTableAccessor.getRegionResult(
+ master.getShortCircuitConnection(), mergedRegionInfo.getRegionName());
// contains merge reference in META
assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@@ -217,8 +217,8 @@ public class TestRegionMergeTransactionOnCluster {
assertFalse(fs.exists(regionAdir));
assertFalse(fs.exists(regionBdir));
- mergedRegionResult = MetaReader.getRegionResult(
- master.getCatalogTracker(), mergedRegionInfo.getRegionName());
+ mergedRegionResult = MetaTableAccessor.getRegionResult(
+ master.getShortCircuitConnection(), mergedRegionInfo.getRegionName());
assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.MERGEA_QUALIFIER) != null);
assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@@ -293,9 +293,9 @@ public class TestRegionMergeTransactionOnCluster {
private PairOfSameType requestMergeRegion(
HMaster master, TableName tablename,
int regionAnum, int regionBnum) throws Exception {
- List> tableRegions = MetaReader
- .getTableRegionsAndLocations(master.getCatalogTracker(),
- tablename);
+ List> tableRegions = MetaTableAccessor
+ .getTableRegionsAndLocations(master.getZooKeeper(),
+ master.getShortCircuitConnection(), tablename);
HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
TEST_UTIL.getHBaseAdmin().mergeRegions(
@@ -310,8 +310,8 @@ public class TestRegionMergeTransactionOnCluster {
List tableRegionsInMaster;
long timeout = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < timeout) {
- tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), tablename);
+ tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
+ master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
.getRegionsOfTable(tablename);
if (tableRegionsInMeta.size() == expectedRegionNum
@@ -321,8 +321,8 @@ public class TestRegionMergeTransactionOnCluster {
Thread.sleep(250);
}
- tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), tablename);
+ tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
+ master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
assertEquals(expectedRegionNum, tableRegionsInMeta.size());
}
@@ -348,15 +348,15 @@ public class TestRegionMergeTransactionOnCluster {
long timeout = System.currentTimeMillis() + waitTime;
List> tableRegions;
while (System.currentTimeMillis() < timeout) {
- tableRegions = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), tablename);
+ tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
+ master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
if (tableRegions.size() == numRegions)
break;
Thread.sleep(250);
}
- tableRegions = MetaReader.getTableRegionsAndLocations(
- master.getCatalogTracker(), tablename);
+ tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
+ master.getZooKeeper(), master.getShortCircuitConnection(), tablename);
LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
assertEquals(numRegions, tableRegions.size());
return table;
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index 2d94a77..4e613d7 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.After;
@@ -96,7 +96,7 @@ public class TestRegionServerNoMaster {
ProtobufUtil.openRegion(hrs.getRSRpcServices(),
hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
while (true) {
- ServerName sn = MetaRegionTracker.getMetaRegionLocation(zkw);
+ ServerName sn = MetaRegionLocator.getMetaRegionLocation(zkw);
if (sn != null && sn.equals(hrs.getServerName())) {
break;
}
@@ -272,7 +272,7 @@ public class TestRegionServerNoMaster {
try {
// we re-opened meta so some of its data is lost
ServerName sn = getRS().getServerName();
- MetaEditor.updateRegionLocation(getRS().catalogTracker,
+ MetaTableAccessor.updateRegionLocation(getRS().getShortCircuitConnection(),
hri, sn, getRS().getRegion(regionName).getOpenSeqNum());
// fake region to be closing now, need to clear state afterwards
getRS().regionsInTransitionInRS.put(hri.getEncodedNameAsBytes(), Boolean.FALSE);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index f03c81d..a9f6682 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -55,8 +55,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
@@ -863,8 +862,8 @@ public class TestSplitTransactionOnCluster {
admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
- boolean tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(),
- tableName);
+ boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+ tableName);
assertEquals("The specified table should present.", true, tableExists);
final HRegion region = findSplittableRegion(regions);
assertTrue("not able to find a splittable region", region != null);
@@ -875,8 +874,8 @@ public class TestSplitTransactionOnCluster {
} catch (IOException e) {
}
- tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(),
- tableName);
+ tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(),
+ tableName);
assertEquals("The specified table should present.", true, tableExists);
} finally {
if (regions != null) {
@@ -1377,13 +1376,13 @@ public class TestSplitTransactionOnCluster {
copyOfParent.setOffline(true);
copyOfParent.setSplit(true);
// Put for parent
- Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
- MetaEditor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
+ Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+ MetaTableAccessor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
daughterRegions.getSecond().getRegionInfo());
metaEntries.add(putParent);
// Puts for daughters
- Put putA = MetaEditor.makePutFromRegionInfo(daughterRegions.getFirst().getRegionInfo());
- Put putB = MetaEditor.makePutFromRegionInfo(daughterRegions.getSecond().getRegionInfo());
+ Put putA = MetaTableAccessor.makePutFromRegionInfo(daughterRegions.getFirst().getRegionInfo());
+ Put putB = MetaTableAccessor.makePutFromRegionInfo(daughterRegions.getSecond().getRegionInfo());
st.addLocation(putA, rs.getServerName(), 1);
st.addLocation(putB, rs.getServerName(), 1);
metaEntries.add(putA);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
index 843f2c6..07235da 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -148,7 +148,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index e6a26e7..09c1a1f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -224,7 +224,7 @@ public class TestReplicationTrackerZKImpl {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index a0133a8..f41b384 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -47,8 +47,8 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -395,10 +395,9 @@ public class TestReplicationSourceManager {
public CoordinatedStateManager getCoordinatedStateManager() {
return null;
}
-
@Override
- public CatalogTracker getCatalogTracker() {
- return null; // To change body of implemented methods use File | Settings | File Templates.
+ public HConnection getShortCircuitConnection() {
+ return null;
}
@Override
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index e38f659..a86c693 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
@@ -144,7 +144,7 @@ public class TestTokenAuthentication {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index 040a466..7963665 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.SmallTests;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -147,8 +146,6 @@ public class TestRestoreSnapshotHelper {
*/
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
- CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class);
- HTableDescriptor tableDescriptor = Mockito.mock(HTableDescriptor.class);
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
MonitoredTask status = Mockito.mock(MonitoredTask.class);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 572d130..b99a5c6 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
@@ -101,7 +101,7 @@ public class MockServer implements Server {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 134a953..0332b14 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -93,7 +93,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.PrintingErrorReporter;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionLocator;
import org.apache.zookeeper.KeeperException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -226,7 +226,7 @@ public class TestHBaseFsck {
}
}
regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
- MetaRegionTracker.deleteMetaLocation(cluster.getMaster().getZooKeeper());
+ MetaRegionLocator.deleteMetaLocation(cluster.getMaster().getZooKeeper());
assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO));
HBaseFsck hbck = doFsck(conf, true);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.UNKNOWN, ERROR_CODE.NO_META_REGION,
@@ -242,7 +242,7 @@ public class TestHBaseFsck {
throws IOException {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME, executorService);
HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
return hri;
}
@@ -1292,7 +1292,7 @@ public class TestHBaseFsck {
hri.setOffline(true);
hri.setSplit(true);
- MetaEditor.addRegionToMeta(meta, hri, a, b);
+ MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
meta.flushCommits();
TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME.getName());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
index c2e7ef1..315d95f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
@@ -29,9 +29,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -111,17 +112,19 @@ public class TestMergeTable {
LOG.info("Starting mini hbase cluster");
UTIL.startMiniHBaseCluster(1, 1);
Configuration c = new Configuration(UTIL.getConfiguration());
- CatalogTracker ct = new CatalogTracker(c);
- ct.start();
+ HConnection hConnection = HConnectionManager.getConnection(c);
+
List originalTableRegions =
- MetaReader.getTableRegions(ct, desc.getTableName());
+ MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(), hConnection,
+ desc.getTableName());
LOG.info("originalTableRegions size=" + originalTableRegions.size() +
"; " + originalTableRegions);
HBaseAdmin admin = new HBaseAdmin(c);
admin.disableTable(desc.getTableName());
HMerge.merge(c, FileSystem.get(c), desc.getTableName());
List postMergeTableRegions =
- MetaReader.getTableRegions(ct, desc.getTableName());
+ MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(), hConnection,
+ desc.getTableName());
LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
"; " + postMergeTableRegions);
assertTrue("originalTableRegions=" + originalTableRegions.size() +
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
index 4a5f84d..e117581 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -220,7 +220,7 @@ public class OfflineMetaRebuildTestCore {
out.close();
// add to meta.
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
return hri;
}