diff --git bin/region_mover.rb bin/region_mover.rb
index 482617c..d3ee796 100644
--- bin/region_mover.rb
+++ bin/region_mover.rb
@@ -101,15 +101,13 @@ def getServerNameForRegion(admin, r)
if r.isMetaRegion()
# Hack
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(admin.getConfiguration(), "region_mover", nil)
+ mtl = org.apache.hadoop.hbase.zookeeper.MetaTableLocator.new()
begin
- tracker = org.apache.hadoop.hbase.zookeeper.MetaRegionTracker.new(zkw, RubyAbortable.new())
- tracker.start()
- while not tracker.isLocationAvailable()
+ while not mtl.isLocationAvailable(zkw)
sleep 0.1
end
# Make a fake servername by appending ','
- metaServer = tracker.getMetaRegionLocation().toString() + ","
- tracker.stop()
+ metaServer = mtl.getMetaRegionLocation(zkw).toString() + ","
return metaServer
ensure
zkw.close()
diff --git dev-support/hbasetests.sh dev-support/hbasetests.sh
index e129bd4..50c3a51 100755
--- dev-support/hbasetests.sh
+++ dev-support/hbasetests.sh
@@ -64,7 +64,7 @@ parallelMaven=1
# the others
#The ',' at the end is mandatory
flakyTests=
-#org.apache.hadoop.hbase.mapreduce.TestTableInputFormatScan,org.apache.hadoop.hbase.catalog.TestMetaReaderEditorNoCluster,org.apache.hadoop.hbase.catalog.TestMetaReaderEditor,org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat,org.apache.hadoop.hbase.mapred.TestTableMapReduce,org.apache.hadoop.hbase.coprocessor.TestMasterCoprocessorExceptionWithAbort,org.apache.hadoop.hbase.coprocessor.TestMasterCoprocessorExceptionWithRemove,org.apache.hadoop.hbase.client.TestAdmin,org.apache.hadoop.hbase.master.TestMasterFailover,org.apache.hadoop.hbase.regionserver.wal.TestLogRolling,org.apache.hadoop.hbase.master.TestDistributedLogSplitting,org.apache.hadoop.hbase.master.TestMasterRestartAfterDisablingTable,org.apache.hadoop.hbase.TestGlobalMemStoreSize,
+#org.apache.hadoop.hbase.mapreduce.TestTableInputFormatScan,org.apache.hadoop.hbase.catalog.TestMetaTableAccessorNoCluster,org.apache.hadoop.hbase.catalog.TestMetaTableAccessor,org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat,org.apache.hadoop.hbase.mapred.TestTableMapReduce,org.apache.hadoop.hbase.coprocessor.TestMasterCoprocessorExceptionWithAbort,org.apache.hadoop.hbase.coprocessor.TestMasterCoprocessorExceptionWithRemove,org.apache.hadoop.hbase.client.TestAdmin,org.apache.hadoop.hbase.master.TestMasterFailover,org.apache.hadoop.hbase.regionserver.wal.TestLogRolling,org.apache.hadoop.hbase.master.TestDistributedLogSplitting,org.apache.hadoop.hbase.master.TestMasterRestartAfterDisablingTable,org.apache.hadoop.hbase.TestGlobalMemStoreSize,
######################################### Internal parameters
#directory used for surefire & the source code.
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java
index c39d6ad..ad0c59f 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
@@ -40,9 +41,24 @@ public interface Server extends Abortable, Stoppable {
ZooKeeperWatcher getZooKeeper();
/**
- * @return Master's instance of {@link CatalogTracker}
+ * Returns reference to wrapped short-circuit (i.e. local, bypassing RPC layer entirely)
+ * HConnection to this server, which may be used for miscellaneous needs.
+ *
+ * Important note: this method returns reference to connection which is managed
+ * by Server itself, so callers must NOT attempt to close connection obtained.
+ *
+ * See {@link org.apache.hadoop.hbase.client.ConnectionUtils#createShortCircuitHConnection}
+ * for details on short-circuit connections.
*/
- CatalogTracker getCatalogTracker();
+ HConnection getShortCircuitConnection();
+
+ /**
+ * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}
+ * running inside this server. This MetaServerLocator is started and stopped by server, clients
+ * shouldn't manage it's lifecycle.
+ * @return
+ */
+ MetaTableLocator getMetaTableLocator();
/**
* @return The unique server name for this server.
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
deleted file mode 100644
index 3889317..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
+++ /dev/null
@@ -1,457 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import com.google.common.base.Stopwatch;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException;
-import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.ipc.RemoteException;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.NoRouteToHostException;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-
-/**
- * Tracks the availability of the catalog tables
- * hbase:meta.
- *
- * This class is "read-only" in that the locations of the catalog tables cannot
- * be explicitly set. Instead, ZooKeeper is used to learn of the availability
- * and location of hbase:meta.
- *
- *
Call {@link #start()} to start up operation. Call {@link #stop()}} to
- * interrupt waits and close up shop.
- */
-@InterfaceAudience.Private
-public class CatalogTracker {
- // TODO JDC 11/30 We don't even have ROOT anymore, revisit
- // TODO: This class needs a rethink. The original intent was that it would be
- // the one-stop-shop for meta locations and that it would get this
- // info from reading and watching zk state. The class was to be used by
- // servers when they needed to know of meta movement but also by
- // client-side (inside in HTable) so rather than figure meta
- // locations on fault, the client would instead get notifications out of zk.
- //
- // But this original intent is frustrated by the fact that this class has to
- // read an hbase table, the -ROOT- table, to figure out the hbase:meta region
- // location which means we depend on an HConnection. HConnection will do
- // retrying but also, it has its own mechanism for finding root and meta
- // locations (and for 'verifying'; it tries the location and if it fails, does
- // new lookup, etc.). So, at least for now, HConnection (or HTable) can't
- // have a CT since CT needs a HConnection (Even then, do want HT to have a CT?
- // For HT keep up a session with ZK? Rather, shouldn't we do like asynchbase
- // where we'd open a connection to zk, read what we need then let the
- // connection go?). The 'fix' is make it so both root and meta addresses
- // are wholey up in zk -- not in zk (root) -- and in an hbase table (meta).
- //
- // But even then, this class does 'verification' of the location and it does
- // this by making a call over an HConnection (which will do its own root
- // and meta lookups). Isn't this verification 'useless' since when we
- // return, whatever is dependent on the result of this call then needs to
- // use HConnection; what we have verified may change in meantime (HConnection
- // uses the CT primitives, the root and meta trackers finding root locations).
- //
- // When meta is moved to zk, this class may make more sense. In the
- // meantime, it does not cohere. It should just watch meta and root and not
- // NOT do verification -- let that be out in HConnection since its going to
- // be done there ultimately anyways.
- //
- // This class has spread throughout the codebase. It needs to be reigned in.
- // This class should be used server-side only, even if we move meta location
- // up into zk. Currently its used over in the client package. Its used in
- // MetaReader and MetaEditor classes usually just to get the Configuration
- // its using (It does this indirectly by asking its HConnection for its
- // Configuration and even then this is just used to get an HConnection out on
- // the other end). I made https://issues.apache.org/jira/browse/HBASE-4495 for
- // doing CT fixup. St.Ack 09/30/2011.
- //
-
- // TODO: Timeouts have never been as advertised in here and its worse now
- // with retries; i.e. the HConnection retries and pause goes ahead whatever
- // the passed timeout is. Fix.
- private static final Log LOG = LogFactory.getLog(CatalogTracker.class);
- private final HConnection connection;
- private final ZooKeeperWatcher zookeeper;
- private final MetaRegionTracker metaRegionTracker;
- private boolean instantiatedzkw = false;
- private Abortable abortable;
-
- private boolean stopped = false;
-
- static final byte [] META_REGION_NAME =
- HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
-
- /**
- * Constructs a catalog tracker. Find current state of catalog tables.
- * Begin active tracking by executing {@link #start()} post construction. Does
- * not timeout.
- *
- * @param conf
- * the {@link Configuration} from which a {@link HConnection} will be
- * obtained; if problem, this connections
- * {@link HConnection#abort(String, Throwable)} will be called.
- * @throws IOException
- */
- public CatalogTracker(final Configuration conf) throws IOException {
- this(null, conf, HConnectionManager.getConnection(conf), null);
- }
-
- /**
- * Constructs the catalog tracker. Find current state of catalog tables.
- * Begin active tracking by executing {@link #start()} post construction.
- * Does not timeout.
- * @param zk If zk is null, we'll create an instance (and shut it down
- * when {@link #stop()} is called) else we'll use what is passed.
- * @param conf
- * @param abortable If fatal exception we'll call abort on this. May be null.
- * If it is we'll use the Connection associated with the passed
- * {@link Configuration} as our Abortable.
- * @throws IOException
- */
- public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
- HConnection connection, Abortable abortable)
- throws IOException {
- this.connection = connection;
- if (abortable == null) {
- // A connection is abortable.
- this.abortable = this.connection;
- } else {
- this.abortable = abortable;
- }
- Abortable throwableAborter = new Abortable() {
-
- @Override
- public void abort(String why, Throwable e) {
- throw new RuntimeException(why, e);
- }
-
- @Override
- public boolean isAborted() {
- return true;
- }
-
- };
- if (zk == null) {
- // Create our own. Set flag so we tear it down on stop.
- this.zookeeper =
- new ZooKeeperWatcher(conf, "catalogtracker-on-" + connection.toString(),
- abortable);
- instantiatedzkw = true;
- } else {
- this.zookeeper = zk;
- }
- this.metaRegionTracker = new MetaRegionTracker(zookeeper, throwableAborter);
- }
-
- /**
- * Starts the catalog tracker.
- * Determines current availability of catalog tables and ensures all further
- * transitions of either region are tracked.
- * @throws IOException
- * @throws InterruptedException
- */
- public void start() throws IOException, InterruptedException {
- LOG.debug("Starting catalog tracker " + this);
- try {
- this.metaRegionTracker.start();
- } catch (RuntimeException e) {
- Throwable t = e.getCause();
- this.abortable.abort(e.getMessage(), t);
- throw new IOException("Attempt to start meta tracker failed.", t);
- }
- }
-
- /**
- * Stop working.
- * Interrupts any ongoing waits.
- */
- public void stop() {
- if (!this.stopped) {
- LOG.debug("Stopping catalog tracker " + this);
- this.stopped = true;
- this.metaRegionTracker.stop();
- try {
- if (this.connection != null) {
- this.connection.close();
- }
- } catch (IOException e) {
- // Although the {@link Closeable} interface throws an {@link
- // IOException}, in reality, the implementation would never do that.
- LOG.error("Attempt to close catalog tracker's connection failed.", e);
- }
- if (this.instantiatedzkw) {
- this.zookeeper.close();
- }
- }
- }
-
- /**
- * Gets the current location for hbase:meta or null if location is
- * not currently available.
- * @return {@link ServerName} for server hosting hbase:meta or null
- * if none available
- * @throws InterruptedException
- */
- public ServerName getMetaLocation() throws InterruptedException {
- return this.metaRegionTracker.getMetaRegionLocation();
- }
-
- /**
- * Checks whether meta regionserver znode has some non null data.
- * @return true if data is not null, false otherwise.
- */
- public boolean isMetaLocationAvailable() {
- return this.metaRegionTracker.isLocationAvailable();
- }
- /**
- * Gets the current location for hbase:meta if available and waits
- * for up to the specified timeout if not immediately available. Returns null
- * if the timeout elapses before root is available.
- * @param timeout maximum time to wait for root availability, in milliseconds
- * @return {@link ServerName} for server hosting hbase:meta or null
- * if none available
- * @throws InterruptedException if interrupted while waiting
- * @throws NotAllMetaRegionsOnlineException if meta not available before
- * timeout
- */
- public ServerName waitForMeta(final long timeout)
- throws InterruptedException, NotAllMetaRegionsOnlineException {
- ServerName sn = metaRegionTracker.waitMetaRegionLocation(timeout);
- if (sn == null) {
- throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms");
- }
- return sn;
- }
-
- /**
- * Gets a connection to the server hosting meta, as reported by ZooKeeper,
- * waiting up to the specified timeout for availability.
- * @param timeout How long to wait on meta location
- * @see #waitForMeta for additional information
- * @return connection to server hosting meta
- * @throws InterruptedException
- * @throws NotAllMetaRegionsOnlineException if timed out waiting
- * @throws IOException
- * @deprecated Use #getMetaServerConnection(long)
- */
- public AdminService.BlockingInterface waitForMetaServerConnection(long timeout)
- throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
- return getMetaServerConnection(timeout);
- }
-
- /**
- * Gets a connection to the server hosting meta, as reported by ZooKeeper,
- * waiting up to the specified timeout for availability.
- *
WARNING: Does not retry. Use an {@link HTable} instead.
- * @param timeout How long to wait on meta location
- * @see #waitForMeta for additional information
- * @return connection to server hosting meta
- * @throws InterruptedException
- * @throws NotAllMetaRegionsOnlineException if timed out waiting
- * @throws IOException
- */
- AdminService.BlockingInterface getMetaServerConnection(long timeout)
- throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
- return getCachedConnection(waitForMeta(timeout));
- }
-
- /**
- * Waits indefinitely for availability of hbase:meta. Used during
- * cluster startup. Does not verify meta, just that something has been
- * set up in zk.
- * @see #waitForMeta(long)
- * @throws InterruptedException if interrupted while waiting
- */
- public void waitForMeta() throws InterruptedException {
- Stopwatch stopwatch = new Stopwatch().start();
- while (!this.stopped) {
- try {
- if (waitForMeta(100) != null) break;
- long sleepTime = stopwatch.elapsedMillis();
- // +1 in case sleepTime=0
- if ((sleepTime + 1) % 10000 == 0) {
- LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
- }
- } catch (NotAllMetaRegionsOnlineException e) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("hbase:meta still not available, sleeping and retrying." +
- " Reason: " + e.getMessage());
- }
- }
- }
- }
-
- /**
- * @param sn ServerName to get a connection against.
- * @return The AdminProtocol we got when we connected to sn
- * May have come from cache, may not be good, may have been setup by this
- * invocation, or may be null.
- * @throws IOException
- */
- @SuppressWarnings("deprecation")
- private AdminService.BlockingInterface getCachedConnection(ServerName sn)
- throws IOException {
- if (sn == null) {
- return null;
- }
- AdminService.BlockingInterface service = null;
- try {
- service = connection.getAdmin(sn);
- } catch (RetriesExhaustedException e) {
- if (e.getCause() != null && e.getCause() instanceof ConnectException) {
- // Catch this; presume it means the cached connection has gone bad.
- } else {
- throw e;
- }
- } catch (SocketTimeoutException e) {
- LOG.debug("Timed out connecting to " + sn);
- } catch (NoRouteToHostException e) {
- LOG.debug("Connecting to " + sn, e);
- } catch (SocketException e) {
- LOG.debug("Exception connecting to " + sn);
- } catch (UnknownHostException e) {
- LOG.debug("Unknown host exception connecting to " + sn);
- } catch (FailedServerException e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Server " + sn + " is in failed server list.");
- }
- } catch (IOException ioe) {
- Throwable cause = ioe.getCause();
- if (ioe instanceof ConnectException) {
- // Catch. Connect refused.
- } else if (cause != null && cause instanceof EOFException) {
- // Catch. Other end disconnected us.
- } else if (cause != null && cause.getMessage() != null &&
- cause.getMessage().toLowerCase().contains("connection reset")) {
- // Catch. Connection reset.
- } else {
- throw ioe;
- }
-
- }
- return service;
- }
-
- /**
- * Verify we can connect to hostingServer and that its carrying
- * regionName.
- * @param hostingServer Interface to the server hosting regionName
- * @param address The servername that goes with the metaServer
- * Interface. Used logging.
- * @param regionName The regionname we are interested in.
- * @return True if we were able to verify the region located at other side of
- * the Interface.
- * @throws IOException
- */
- // TODO: We should be able to get the ServerName from the AdminProtocol
- // rather than have to pass it in. Its made awkward by the fact that the
- // HRI is likely a proxy against remote server so the getServerName needs
- // to be fixed to go to a local method or to a cache before we can do this.
- private boolean verifyRegionLocation(AdminService.BlockingInterface hostingServer,
- final ServerName address, final byte [] regionName)
- throws IOException {
- if (hostingServer == null) {
- LOG.info("Passed hostingServer is null");
- return false;
- }
- Throwable t = null;
- try {
- // Try and get regioninfo from the hosting server.
- return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null;
- } catch (ConnectException e) {
- t = e;
- } catch (RetriesExhaustedException e) {
- t = e;
- } catch (RemoteException e) {
- IOException ioe = e.unwrapRemoteException();
- t = ioe;
- } catch (IOException e) {
- Throwable cause = e.getCause();
- if (cause != null && cause instanceof EOFException) {
- t = cause;
- } else if (cause != null && cause.getMessage() != null
- && cause.getMessage().contains("Connection reset")) {
- t = cause;
- } else {
- t = e;
- }
- }
- LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
- " at address=" + address + ", exception=" + t);
- return false;
- }
-
- /**
- * Verify hbase:meta is deployed and accessible.
- * @param timeout How long to wait on zk for meta address (passed through to
- * the internal call to {@link #waitForMetaServerConnection(long)}.
- * @return True if the hbase:meta location is healthy.
- * @throws IOException
- * @throws InterruptedException
- */
- public boolean verifyMetaRegionLocation(final long timeout)
- throws InterruptedException, IOException {
- AdminService.BlockingInterface service = null;
- try {
- service = waitForMetaServerConnection(timeout);
- } catch (NotAllMetaRegionsOnlineException e) {
- // Pass
- } catch (ServerNotRunningYetException e) {
- // Pass -- remote server is not up so can't be carrying root
- } catch (UnknownHostException e) {
- // Pass -- server name doesn't resolve so it can't be assigned anything.
- } catch (RegionServerStoppedException e) {
- // Pass -- server name sends us to a server that is dying or already dead.
- }
- return (service == null)? false:
- verifyRegionLocation(service,
- this.metaRegionTracker.getMetaRegionLocation(), META_REGION_NAME);
- }
-
- public HConnection getConnection() {
- return this.connection;
- }
-
- @Override
- public String toString() {
- return "CatalogTracker{" + "connection=" + connection + ", zookeeper=" + zookeeper +
- ", metaRegionTracker=" + metaRegionTracker + ", stopped=" + stopped + '}';
- }
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
deleted file mode 100644
index ec0f4a0..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
+++ /dev/null
@@ -1,618 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.TreeMap;
-
-/**
- * Reads region and assignment information from hbase:meta.
- */
-@InterfaceAudience.Private
-public class MetaReader {
- // TODO: Strip CatalogTracker from this class. Its all over and in the end
- // its only used to get its Configuration so we can get associated
- // Connection.
- private static final Log LOG = LogFactory.getLog(MetaReader.class);
-
- static final byte [] META_REGION_PREFIX;
- static {
- // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
- // FIRST_META_REGIONINFO == 'hbase:meta,,1'. META_REGION_PREFIX == 'hbase:meta,'
- int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
- META_REGION_PREFIX = new byte [len];
- System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
- META_REGION_PREFIX, 0, len);
- }
-
- /**
- * Performs a full scan of hbase:meta, skipping regions from any
- * tables in the specified set of disabled tables.
- * @param catalogTracker
- * @param disabledTables set of disabled tables that will not be returned
- * @return Returns a map of every region to it's currently assigned server,
- * according to META. If the region does not have an assignment it will have
- * a null value in the map.
- * @throws IOException
- */
- public static Map fullScan(
- CatalogTracker catalogTracker, final Set disabledTables)
- throws IOException {
- return fullScan(catalogTracker, disabledTables, false);
- }
-
- /**
- * Performs a full scan of hbase:meta, skipping regions from any
- * tables in the specified set of disabled tables.
- * @param catalogTracker
- * @param disabledTables set of disabled tables that will not be returned
- * @param excludeOfflinedSplitParents If true, do not include offlined split
- * parents in the return.
- * @return Returns a map of every region to it's currently assigned server,
- * according to META. If the region does not have an assignment it will have
- * a null value in the map.
- * @throws IOException
- */
- public static Map fullScan(
- CatalogTracker catalogTracker, final Set disabledTables,
- final boolean excludeOfflinedSplitParents)
- throws IOException {
- final Map regions =
- new TreeMap();
- Visitor v = new Visitor() {
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- Pair region = HRegionInfo.getHRegionInfoAndServerName(r);
- HRegionInfo hri = region.getFirst();
- if (hri == null) return true;
- if (hri.getTable() == null) return true;
- if (disabledTables.contains(
- hri.getTable())) return true;
- // Are we to include split parents in the list?
- if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
- regions.put(hri, region.getSecond());
- return true;
- }
- };
- fullScan(catalogTracker, v);
- return regions;
- }
-
- /**
- * Performs a full scan of hbase:meta.
- * @return List of {@link Result}
- * @throws IOException
- */
- public static List fullScan(CatalogTracker catalogTracker)
- throws IOException {
- CollectAllVisitor v = new CollectAllVisitor();
- fullScan(catalogTracker, v, null);
- return v.getResults();
- }
-
- /**
- * Performs a full scan of a hbase:meta table.
- * @return List of {@link Result}
- * @throws IOException
- */
- public static List fullScanOfMeta(CatalogTracker catalogTracker)
- throws IOException {
- CollectAllVisitor v = new CollectAllVisitor();
- fullScan(catalogTracker, v, null);
- return v.getResults();
- }
-
- /**
- * Performs a full scan of hbase:meta.
- * @param catalogTracker
- * @param visitor Visitor invoked against each row.
- * @throws IOException
- */
- public static void fullScan(CatalogTracker catalogTracker,
- final Visitor visitor)
- throws IOException {
- fullScan(catalogTracker, visitor, null);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param catalogTracker We'll use this catalogtracker's connection
- * @param tableName Table to get an {@link HTable} against.
- * @return An {@link HTable} for tableName
- * @throws IOException
- */
- @SuppressWarnings("deprecation")
- private static HTable getHTable(final CatalogTracker catalogTracker,
- final TableName tableName)
- throws IOException {
- // Passing the CatalogTracker's connection ensures this
- // HTable instance uses the CatalogTracker's connection.
- org.apache.hadoop.hbase.client.HConnection c = catalogTracker.getConnection();
- if (c == null) throw new NullPointerException("No connection");
- return new HTable(tableName, c);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param catalogTracker
- * @return An {@link HTable} for hbase:meta
- * @throws IOException
- */
- static HTable getCatalogHTable(final CatalogTracker catalogTracker)
- throws IOException {
- return getMetaHTable(catalogTracker);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param ct
- * @return An {@link HTable} for hbase:meta
- * @throws IOException
- */
- static HTable getMetaHTable(final CatalogTracker ct)
- throws IOException {
- return getHTable(ct, TableName.META_TABLE_NAME);
- }
-
- /**
- * @param t Table to use (will be closed when done).
- * @param g Get to run
- * @throws IOException
- */
- private static Result get(final HTable t, final Get g) throws IOException {
- try {
- return t.get(g);
- } finally {
- t.close();
- }
- }
-
- /**
- * Reads the location of the specified region
- * @param catalogTracker
- * @param regionName region whose location we are after
- * @return location of region as a {@link ServerName} or null if not found
- * @throws IOException
- */
- static ServerName readRegionLocation(CatalogTracker catalogTracker,
- byte [] regionName)
- throws IOException {
- Pair pair = getRegion(catalogTracker, regionName);
- return (pair == null || pair.getSecond() == null)? null: pair.getSecond();
- }
-
- /**
- * Gets the region info and assignment for the specified region.
- * @param catalogTracker
- * @param regionName Region to lookup.
- * @return Location and HRegionInfo for regionName
- * @throws IOException
- */
- public static Pair getRegion(
- CatalogTracker catalogTracker, byte [] regionName)
- throws IOException {
- Get get = new Get(regionName);
- get.addFamily(HConstants.CATALOG_FAMILY);
- Result r = get(getCatalogHTable(catalogTracker), get);
- return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r);
- }
-
- /**
- * Gets the result in hbase:meta for the specified region.
- * @param catalogTracker
- * @param regionName
- * @return result of the specified region
- * @throws IOException
- */
- public static Result getRegionResult(CatalogTracker catalogTracker,
- byte[] regionName) throws IOException {
- Get get = new Get(regionName);
- get.addFamily(HConstants.CATALOG_FAMILY);
- return get(getCatalogHTable(catalogTracker), get);
- }
-
- /**
- * Get regions from the merge qualifier of the specified merged region
- * @return null if it doesn't contain merge qualifier, else two merge regions
- * @throws IOException
- */
- public static Pair getRegionsFromMergeQualifier(
- CatalogTracker catalogTracker, byte[] regionName) throws IOException {
- Result result = getRegionResult(catalogTracker, regionName);
- HRegionInfo mergeA = HRegionInfo.getHRegionInfo(result,
- HConstants.MERGEA_QUALIFIER);
- HRegionInfo mergeB = HRegionInfo.getHRegionInfo(result,
- HConstants.MERGEB_QUALIFIER);
- if (mergeA == null && mergeB == null) {
- return null;
- }
- return new Pair(mergeA, mergeB);
- }
-
- /**
- * Checks if the specified table exists. Looks at the hbase:meta table hosted on
- * the specified server.
- * @param catalogTracker
- * @param tableName table to check
- * @return true if the table exists in meta, false if not
- * @throws IOException
- */
- public static boolean tableExists(CatalogTracker catalogTracker,
- final TableName tableName)
- throws IOException {
- if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
- // Catalog tables always exist.
- return true;
- }
- // Make a version of ResultCollectingVisitor that only collects the first
- CollectingVisitor visitor = new CollectingVisitor() {
- private HRegionInfo current = null;
-
- @Override
- public boolean visit(Result r) throws IOException {
- this.current =
- HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
- if (this.current == null) {
- LOG.warn("No serialized HRegionInfo in " + r);
- return true;
- }
- if (!isInsideTable(this.current, tableName)) return false;
- // Else call super and add this Result to the collection.
- super.visit(r);
- // Stop collecting regions from table after we get one.
- return false;
- }
-
- @Override
- void add(Result r) {
- // Add the current HRI.
- this.results.add(this.current);
- }
- };
- fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName));
- // If visitor has results >= 1 then table exists.
- return visitor.getResults().size() >= 1;
- }
-
- /**
- * Gets all of the regions of the specified table.
- * @param catalogTracker
- * @param tableName
- * @return Ordered list of {@link HRegionInfo}.
- * @throws IOException
- */
- public static List getTableRegions(CatalogTracker catalogTracker,
- TableName tableName)
- throws IOException {
- return getTableRegions(catalogTracker, tableName, false);
- }
-
- /**
- * Gets all of the regions of the specified table.
- * @param catalogTracker
- * @param tableName
- * @param excludeOfflinedSplitParents If true, do not include offlined split
- * parents in the return.
- * @return Ordered list of {@link HRegionInfo}.
- * @throws IOException
- */
- public static List getTableRegions(CatalogTracker catalogTracker,
- TableName tableName, final boolean excludeOfflinedSplitParents)
- throws IOException {
- List> result = null;
- try {
- result = getTableRegionsAndLocations(catalogTracker, tableName,
- excludeOfflinedSplitParents);
- } catch (InterruptedException e) {
- throw (InterruptedIOException)new InterruptedIOException().initCause(e);
- }
- return getListOfHRegionInfos(result);
- }
-
- static List getListOfHRegionInfos(final List> pairs) {
- if (pairs == null || pairs.isEmpty()) return null;
- List result = new ArrayList(pairs.size());
- for (Pair pair: pairs) {
- result.add(pair.getFirst());
- }
- return result;
- }
-
- /**
- * @param current
- * @param tableName
- * @return True if current tablename is equal to
- * tableName
- */
- static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
- return tableName.equals(current.getTable());
- }
-
- /**
- * @param tableName
- * @return Place to start Scan in hbase:meta when passed a
- * tableName; returns <tableName&rt; <,&rt; <,&rt;
- */
- static byte [] getTableStartRowForMeta(TableName tableName) {
- byte [] startRow = new byte[tableName.getName().length + 2];
- System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
- startRow[startRow.length - 2] = HConstants.DELIMITER;
- startRow[startRow.length - 1] = HConstants.DELIMITER;
- return startRow;
- }
-
- /**
- * This method creates a Scan object that will only scan catalog rows that
- * belong to the specified table. It doesn't specify any columns.
- * This is a better alternative to just using a start row and scan until
- * it hits a new table since that requires parsing the HRI to get the table
- * name.
- * @param tableName bytes of table's name
- * @return configured Scan object
- */
- public static Scan getScanForTableName(TableName tableName) {
- String strName = tableName.getNameAsString();
- // Start key is just the table name with delimiters
- byte[] startKey = Bytes.toBytes(strName + ",,");
- // Stop key appends the smallest possible char to the table name
- byte[] stopKey = Bytes.toBytes(strName + " ,,");
-
- Scan scan = new Scan(startKey);
- scan.setStopRow(stopKey);
- return scan;
- }
-
- /**
- * @param catalogTracker
- * @param tableName
- * @return Return list of regioninfos and server.
- * @throws IOException
- * @throws InterruptedException
- */
- public static List>
- getTableRegionsAndLocations(CatalogTracker catalogTracker, TableName tableName)
- throws IOException, InterruptedException {
- return getTableRegionsAndLocations(catalogTracker, tableName,
- true);
- }
-
- /**
- * @param catalogTracker
- * @param tableName
- * @return Return list of regioninfos and server addresses.
- * @throws IOException
- * @throws InterruptedException
- */
- public static List>
- getTableRegionsAndLocations(final CatalogTracker catalogTracker,
- final TableName tableName, final boolean excludeOfflinedSplitParents)
- throws IOException, InterruptedException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
- // If meta, do a bit of special handling.
- ServerName serverName = catalogTracker.getMetaLocation();
- List> list =
- new ArrayList>();
- list.add(new Pair(HRegionInfo.FIRST_META_REGIONINFO,
- serverName));
- return list;
- }
- // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
- CollectingVisitor> visitor =
- new CollectingVisitor>() {
- private Pair current = null;
-
- @Override
- public boolean visit(Result r) throws IOException {
- HRegionInfo hri =
- HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
- if (hri == null) {
- LOG.warn("No serialized HRegionInfo in " + r);
- return true;
- }
- if (!isInsideTable(hri, tableName)) return false;
- if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
- ServerName sn = HRegionInfo.getServerName(r);
- // Populate this.current so available when we call #add
- this.current = new Pair(hri, sn);
- // Else call super and add this Result to the collection.
- return super.visit(r);
- }
-
- @Override
- void add(Result r) {
- this.results.add(this.current);
- }
- };
- fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName));
- return visitor.getResults();
- }
-
- /**
- * @param catalogTracker
- * @param serverName
- * @return List of user regions installed on this server (does not include
- * catalog regions).
- * @throws IOException
- */
- public static NavigableMap
- getServerUserRegions(CatalogTracker catalogTracker, final ServerName serverName)
- throws IOException {
- final NavigableMap hris = new TreeMap();
- // Fill the above hris map with entries from hbase:meta that have the passed
- // servername.
- CollectingVisitor v = new CollectingVisitor() {
- @Override
- void add(Result r) {
- if (r == null || r.isEmpty()) return;
- if (HRegionInfo.getHRegionInfo(r) == null) return;
- ServerName sn = HRegionInfo.getServerName(r);
- if (sn != null && sn.equals(serverName)) {
- this.results.add(r);
- }
- }
- };
- fullScan(catalogTracker, v);
- List results = v.getResults();
- if (results != null && !results.isEmpty()) {
- // Convert results to Map keyed by HRI
- for (Result r: results) {
- HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
- if (hri != null) hris.put(hri, r);
- }
- }
- return hris;
- }
-
- public static void fullScanMetaAndPrint(final CatalogTracker catalogTracker)
- throws IOException {
- Visitor v = new Visitor() {
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
- HRegionInfo hrim = HRegionInfo.getHRegionInfo(r);
- LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim);
- return true;
- }
- };
- fullScan(catalogTracker, v);
- }
-
- /**
- * Performs a full scan of a catalog table.
- * @param catalogTracker
- * @param visitor Visitor invoked against each row.
- * @param startrow Where to start the scan. Pass null if want to begin scan
- * at first row.
- * hbase:meta, the default (pass false to scan hbase:meta)
- * @throws IOException
- */
- public static void fullScan(CatalogTracker catalogTracker,
- final Visitor visitor, final byte [] startrow)
- throws IOException {
- Scan scan = new Scan();
- if (startrow != null) scan.setStartRow(startrow);
- if (startrow == null) {
- int caching = catalogTracker.getConnection().getConfiguration()
- .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
- scan.setCaching(caching);
- }
- scan.addFamily(HConstants.CATALOG_FAMILY);
- HTable metaTable = getMetaHTable(catalogTracker);
- ResultScanner scanner = null;
- try {
- scanner = metaTable.getScanner(scan);
- Result data;
- while((data = scanner.next()) != null) {
- if (data.isEmpty()) continue;
- // Break if visit returns false.
- if (!visitor.visit(data)) break;
- }
- } finally {
- if (scanner != null) scanner.close();
- metaTable.close();
- }
- return;
- }
-
- /**
- * Implementations 'visit' a catalog table row.
- */
- public interface Visitor {
- /**
- * Visit the catalog table row.
- * @param r A row from catalog table
- * @return True if we are to proceed scanning the table, else false if
- * we are to stop now.
- */
- boolean visit(final Result r) throws IOException;
- }
-
- /**
- * A {@link Visitor} that collects content out of passed {@link Result}.
- */
- static abstract class CollectingVisitor implements Visitor {
- final List results = new ArrayList();
- @Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- add(r);
- return true;
- }
-
- abstract void add(Result r);
-
- /**
- * @return Collected results; wait till visits complete to collect all
- * possible results
- */
- List getResults() {
- return this.results;
- }
- }
-
- /**
- * Collects all returned.
- */
- static class CollectAllVisitor extends CollectingVisitor {
- @Override
- void add(Result r) {
- this.results.add(r);
- }
- }
-
- /**
- * Count regions in hbase:meta for passed table.
- * @param c
- * @param tableName
- * @return Count or regions in table tableName
- * @throws IOException
- */
- public static int getRegionCount(final Configuration c, final String tableName) throws IOException {
- HTable t = new HTable(c, tableName);
- try {
- return t.getRegionLocations().size();
- } finally {
- t.close();
- }
- }
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaTableAccessor.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaTableAccessor.java
new file mode 100644
index 0000000..a242a61
--- /dev/null
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaTableAccessor.java
@@ -0,0 +1,1154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.catalog;
+
+import com.google.protobuf.ServiceException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.net.ConnectException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * Read/write operations on region and assignment information store in
+ * hbase:meta.
+ *
+ * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
+ * for this is because when used on client-side (like from HBaseAdmin), we want to use
+ * short-living connection (opened before each operation, closed right after), while
+ * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
+ */
+@InterfaceAudience.Private
+public class MetaTableAccessor {
+ private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
+
+ static final byte [] META_REGION_PREFIX;
+ static {
+ // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
+ // FIRST_META_REGIONINFO == 'hbase:meta,,1'. META_REGION_PREFIX == 'hbase:meta,'
+ int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
+ META_REGION_PREFIX = new byte [len];
+ System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
+ META_REGION_PREFIX, 0, len);
+ }
+
+ ////////////////////////
+ // Reading operations //
+ ////////////////////////
+
+ /**
+ * Performs a full scan of hbase:meta, skipping regions from any
+ * tables in the specified set of disabled tables.
+ * @param hConnection connection we're using
+ * @param disabledTables set of disabled tables that will not be returned
+ * @return Returns a map of every region to it's currently assigned server,
+ * according to META. If the region does not have an assignment it will have
+ * a null value in the map.
+ * @throws IOException
+ */
+ public static Map fullScan(
+ HConnection hConnection, final Set disabledTables)
+ throws IOException {
+ return fullScan(hConnection, disabledTables, false);
+ }
+
+ /**
+ * Performs a full scan of hbase:meta, skipping regions from any
+ * tables in the specified set of disabled tables.
+ * @param hConnection connection we're using
+ * @param disabledTables set of disabled tables that will not be returned
+ * @param excludeOfflinedSplitParents If true, do not include offlined split
+ * parents in the return.
+ * @return Returns a map of every region to it's currently assigned server,
+ * according to META. If the region does not have an assignment it will have
+ * a null value in the map.
+ * @throws IOException
+ */
+ public static Map fullScan(
+ HConnection hConnection, final Set disabledTables,
+ final boolean excludeOfflinedSplitParents)
+ throws IOException {
+ final Map regions =
+ new TreeMap();
+ Visitor v = new Visitor() {
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ Pair region = HRegionInfo.getHRegionInfoAndServerName(r);
+ HRegionInfo hri = region.getFirst();
+ if (hri == null) return true;
+ if (hri.getTable() == null) return true;
+ if (disabledTables.contains(
+ hri.getTable())) return true;
+ // Are we to include split parents in the list?
+ if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
+ regions.put(hri, region.getSecond());
+ return true;
+ }
+ };
+ fullScan(hConnection, v);
+ return regions;
+ }
+
+ /**
+ * Performs a full scan of a hbase:meta table.
+ * @return List of {@link Result}
+ * @throws IOException
+ */
+ public static List fullScanOfMeta(HConnection hConnection)
+ throws IOException {
+ CollectAllVisitor v = new CollectAllVisitor();
+ fullScan(hConnection, v, null);
+ return v.getResults();
+ }
+
+ /**
+ * Performs a full scan of hbase:meta.
+ * @param hConnection connection we're using
+ * @param visitor Visitor invoked against each row.
+ * @throws IOException
+ */
+ public static void fullScan(HConnection hConnection,
+ final Visitor visitor)
+ throws IOException {
+ fullScan(hConnection, visitor, null);
+ }
+
+ /**
+ * Performs a full scan of hbase:meta.
+ * @param hConnection connection we're using
+ * @return List of {@link Result}
+ * @throws IOException
+ */
+ public static List fullScan(HConnection hConnection)
+ throws IOException {
+ CollectAllVisitor v = new CollectAllVisitor();
+ fullScan(hConnection, v, null);
+ return v.getResults();
+ }
+
+ /**
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param hConnection connection we're using to access table
+ * @param tableName Table to get an {@link HTable} against.
+ * @return An {@link HTable} for tableName
+ * @throws IOException
+ * @SuppressWarnings("deprecation")
+ */
+ private static HTable getHTable(final HConnection hConnection,
+ final TableName tableName)
+ throws IOException {
+ // We used to pass whole CatalogTracker in here, now we just pass in HConnection
+ if (hConnection == null || hConnection.isClosed()) {
+ throw new NullPointerException("No connection");
+ }
+ return new HTable(tableName, hConnection);
+ }
+
+ /**
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param hConnection connection we're using to access Meta
+ * @return An {@link HTable} for hbase:meta
+ * @throws IOException
+ */
+ static HTable getMetaHTable(final HConnection hConnection)
+ throws IOException {
+ return getHTable(hConnection, TableName.META_TABLE_NAME);
+ }
+
+ /**
+ * @param t Table to use (will be closed when done).
+ * @param g Get to run
+ * @throws IOException
+ */
+ private static Result get(final HTable t, final Get g) throws IOException {
+ try {
+ return t.get(g);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Reads the location of the specified region
+ * @param hConnection connection we're using
+ * @param regionName region whose location we are after
+ * @return location of region as a {@link ServerName} or null if not found
+ * @throws IOException
+ */
+ static ServerName readRegionLocation(HConnection hConnection,
+ byte [] regionName)
+ throws IOException {
+ Pair pair = getRegion(hConnection, regionName);
+ return (pair == null || pair.getSecond() == null)? null: pair.getSecond();
+ }
+
+ /**
+ * Gets the region info and assignment for the specified region.
+ * @param hConnection connection we're using
+ * @param regionName Region to lookup.
+ * @return Location and HRegionInfo for regionName
+ * @throws IOException
+ */
+ public static Pair getRegion(
+ HConnection hConnection, byte [] regionName)
+ throws IOException {
+ Get get = new Get(regionName);
+ get.addFamily(HConstants.CATALOG_FAMILY);
+ Result r = get(getMetaHTable(hConnection), get);
+ return (r == null || r.isEmpty())? null: HRegionInfo.getHRegionInfoAndServerName(r);
+ }
+
+ /**
+ * Gets the result in hbase:meta for the specified region.
+ * @param hConnection connection we're using
+ * @param regionName region we're looking for
+ * @return result of the specified region
+ * @throws IOException
+ */
+ public static Result getRegionResult(HConnection hConnection,
+ byte[] regionName) throws IOException {
+ Get get = new Get(regionName);
+ get.addFamily(HConstants.CATALOG_FAMILY);
+ return get(getMetaHTable(hConnection), get);
+ }
+
+ /**
+ * Get regions from the merge qualifier of the specified merged region
+ * @return null if it doesn't contain merge qualifier, else two merge regions
+ * @throws IOException
+ */
+ public static Pair getRegionsFromMergeQualifier(
+ HConnection hConnection, byte[] regionName) throws IOException {
+ Result result = getRegionResult(hConnection, regionName);
+ HRegionInfo mergeA = HRegionInfo.getHRegionInfo(result,
+ HConstants.MERGEA_QUALIFIER);
+ HRegionInfo mergeB = HRegionInfo.getHRegionInfo(result,
+ HConstants.MERGEB_QUALIFIER);
+ if (mergeA == null && mergeB == null) {
+ return null;
+ }
+ return new Pair(mergeA, mergeB);
+ }
+
+ /**
+ * Checks if the specified table exists. Looks at the hbase:meta table hosted on
+ * the specified server.
+ * @param hConnection connection we're using
+ * @param tableName table to check
+ * @return true if the table exists in meta, false if not
+ * @throws IOException
+ */
+ public static boolean tableExists(HConnection hConnection,
+ final TableName tableName)
+ throws IOException {
+ if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
+ // Catalog tables always exist.
+ return true;
+ }
+ // Make a version of ResultCollectingVisitor that only collects the first
+ CollectingVisitor visitor = new CollectingVisitor() {
+ private HRegionInfo current = null;
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ this.current =
+ HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ if (this.current == null) {
+ LOG.warn("No serialized HRegionInfo in " + r);
+ return true;
+ }
+ if (!isInsideTable(this.current, tableName)) return false;
+ // Else call super and add this Result to the collection.
+ super.visit(r);
+ // Stop collecting regions from table after we get one.
+ return false;
+ }
+
+ @Override
+ void add(Result r) {
+ // Add the current HRI.
+ this.results.add(this.current);
+ }
+ };
+ fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+ // If visitor has results >= 1 then table exists.
+ return visitor.getResults().size() >= 1;
+ }
+
+ /**
+ * Gets all of the regions of the specified table.
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @return Ordered list of {@link HRegionInfo}.
+ * @throws IOException
+ */
+ public static List getTableRegions(ZooKeeperWatcher zkw,
+ HConnection hConnection, TableName tableName)
+ throws IOException {
+ return getTableRegions(zkw, hConnection, tableName, false);
+ }
+
+ /**
+ * Gets all of the regions of the specified table.
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @param excludeOfflinedSplitParents If true, do not include offlined split
+ * parents in the return.
+ * @return Ordered list of {@link HRegionInfo}.
+ * @throws IOException
+ */
+ public static List getTableRegions(ZooKeeperWatcher zkw,
+ HConnection hConnection, TableName tableName, final boolean excludeOfflinedSplitParents)
+ throws IOException {
+ List> result = null;
+ try {
+ result = getTableRegionsAndLocations(zkw, hConnection, tableName,
+ excludeOfflinedSplitParents);
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ return getListOfHRegionInfos(result);
+ }
+
+ static List getListOfHRegionInfos(final List> pairs) {
+ if (pairs == null || pairs.isEmpty()) return null;
+ List result = new ArrayList(pairs.size());
+ for (Pair pair: pairs) {
+ result.add(pair.getFirst());
+ }
+ return result;
+ }
+
+ /**
+ * @param current
+ * @param tableName
+ * @return True if current tablename is equal to
+ * tableName
+ */
+ static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
+ return tableName.equals(current.getTable());
+ }
+
+ /**
+ * @param tableName
+ * @return Place to start Scan in hbase:meta when passed a
+ * tableName; returns <tableName&rt; <,&rt; <,&rt;
+ */
+ static byte [] getTableStartRowForMeta(TableName tableName) {
+ byte [] startRow = new byte[tableName.getName().length + 2];
+ System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
+ startRow[startRow.length - 2] = HConstants.DELIMITER;
+ startRow[startRow.length - 1] = HConstants.DELIMITER;
+ return startRow;
+ }
+
+ /**
+ * This method creates a Scan object that will only scan catalog rows that
+ * belong to the specified table. It doesn't specify any columns.
+ * This is a better alternative to just using a start row and scan until
+ * it hits a new table since that requires parsing the HRI to get the table
+ * name.
+ * @param tableName bytes of table's name
+ * @return configured Scan object
+ */
+ public static Scan getScanForTableName(TableName tableName) {
+ String strName = tableName.getNameAsString();
+ // Start key is just the table name with delimiters
+ byte[] startKey = Bytes.toBytes(strName + ",,");
+ // Stop key appends the smallest possible char to the table name
+ byte[] stopKey = Bytes.toBytes(strName + " ,,");
+
+ Scan scan = new Scan(startKey);
+ scan.setStopRow(stopKey);
+ return scan;
+ }
+
+ /**
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @return Return list of regioninfos and server.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public static List>
+ getTableRegionsAndLocations(ZooKeeperWatcher zkw,
+ HConnection hConnection, TableName tableName)
+ throws IOException, InterruptedException {
+ return getTableRegionsAndLocations(zkw, hConnection, tableName, true);
+ }
+
+ /**
+ * @param zkw zookeeper connection to access meta table
+ * @param hConnection connection we're using
+ * @param tableName table we're looking for
+ * @return Return list of regioninfos and server addresses.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public static List>
+ getTableRegionsAndLocations(final ZooKeeperWatcher zkw,
+ final HConnection hConnection, final TableName tableName,
+ final boolean excludeOfflinedSplitParents)
+ throws IOException, InterruptedException {
+ if (tableName.equals(TableName.META_TABLE_NAME)) {
+ // If meta, do a bit of special handling.
+ ServerName serverName;
+ serverName = new MetaTableLocator().getMetaRegionLocation(zkw);
+ List> list =
+ new ArrayList>();
+ list.add(new Pair(HRegionInfo.FIRST_META_REGIONINFO,
+ serverName));
+ return list;
+ }
+ // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
+ CollectingVisitor> visitor =
+ new CollectingVisitor>() {
+ private Pair current = null;
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ HRegionInfo hri =
+ HRegionInfo.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ if (hri == null) {
+ LOG.warn("No serialized HRegionInfo in " + r);
+ return true;
+ }
+ if (!isInsideTable(hri, tableName)) return false;
+ if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
+ ServerName sn = HRegionInfo.getServerName(r);
+ // Populate this.current so available when we call #add
+ this.current = new Pair(hri, sn);
+ // Else call super and add this Result to the collection.
+ return super.visit(r);
+ }
+
+ @Override
+ void add(Result r) {
+ this.results.add(this.current);
+ }
+ };
+ fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
+ return visitor.getResults();
+ }
+
+ /**
+ * @param hConnection connection we're using
+ * @param serverName server we're connected to
+ * @return List of user regions installed on this server (does not include
+ * catalog regions).
+ * @throws IOException
+ */
+ public static NavigableMap
+ getServerUserRegions(HConnection hConnection, final ServerName serverName)
+ throws IOException {
+ final NavigableMap hris = new TreeMap();
+ // Fill the above hris map with entries from hbase:meta that have the passed
+ // servername.
+ CollectingVisitor v = new CollectingVisitor() {
+ @Override
+ void add(Result r) {
+ if (r == null || r.isEmpty()) return;
+ if (HRegionInfo.getHRegionInfo(r) == null) return;
+ ServerName sn = HRegionInfo.getServerName(r);
+ if (sn != null && sn.equals(serverName)) {
+ this.results.add(r);
+ }
+ }
+ };
+ fullScan(hConnection, v);
+ List results = v.getResults();
+ if (results != null && !results.isEmpty()) {
+ // Convert results to Map keyed by HRI
+ for (Result r: results) {
+ HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
+ if (hri != null) hris.put(hri, r);
+ }
+ }
+ return hris;
+ }
+
+ public static void fullScanMetaAndPrint(final HConnection hConnection)
+ throws IOException {
+ Visitor v = new Visitor() {
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
+ HRegionInfo hrim = HRegionInfo.getHRegionInfo(r);
+ LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim);
+ return true;
+ }
+ };
+ fullScan(hConnection, v);
+ }
+
+ /**
+ * Performs a full scan of a catalog table.
+ * @param hConnection connection we're using
+ * @param visitor Visitor invoked against each row.
+ * @param startrow Where to start the scan. Pass null if want to begin scan
+ * at first row.
+ * hbase:meta, the default (pass false to scan hbase:meta)
+ * @throws IOException
+ */
+ public static void fullScan(HConnection hConnection,
+ final Visitor visitor, final byte [] startrow)
+ throws IOException {
+ Scan scan = new Scan();
+ if (startrow != null) scan.setStartRow(startrow);
+ if (startrow == null) {
+ int caching = hConnection.getConfiguration()
+ .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
+ scan.setCaching(caching);
+ }
+ scan.addFamily(HConstants.CATALOG_FAMILY);
+ HTable metaTable = getMetaHTable(hConnection);
+ ResultScanner scanner = null;
+ try {
+ scanner = metaTable.getScanner(scan);
+ Result data;
+ while((data = scanner.next()) != null) {
+ if (data.isEmpty()) continue;
+ // Break if visit returns false.
+ if (!visitor.visit(data)) break;
+ }
+ } finally {
+ if (scanner != null) scanner.close();
+ metaTable.close();
+ }
+ return;
+ }
+
+ /**
+ * Implementations 'visit' a catalog table row.
+ */
+ public interface Visitor {
+ /**
+ * Visit the catalog table row.
+ * @param r A row from catalog table
+ * @return True if we are to proceed scanning the table, else false if
+ * we are to stop now.
+ */
+ boolean visit(final Result r) throws IOException;
+ }
+
+ /**
+ * A {@link Visitor} that collects content out of passed {@link Result}.
+ */
+ static abstract class CollectingVisitor implements Visitor {
+ final List results = new ArrayList();
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ add(r);
+ return true;
+ }
+
+ abstract void add(Result r);
+
+ /**
+ * @return Collected results; wait till visits complete to collect all
+ * possible results
+ */
+ List getResults() {
+ return this.results;
+ }
+ }
+
+ /**
+ * Collects all returned.
+ */
+ static class CollectAllVisitor extends CollectingVisitor {
+ @Override
+ void add(Result r) {
+ this.results.add(r);
+ }
+ }
+
+ /**
+ * Count regions in hbase:meta for passed table.
+ * @param c
+ * @param tableName
+ * @return Count or regions in table tableName
+ * @throws IOException
+ */
+ public static int getRegionCount(final Configuration c, final String tableName) throws IOException {
+ HTable t = new HTable(c, tableName);
+ try {
+ return t.getRegionLocations().size();
+ } finally {
+ t.close();
+ }
+ }
+
+ ////////////////////////
+ // Editing operations //
+ ////////////////////////
+
+ /**
+ * Generates and returns a Put containing the region into for the catalog table
+ */
+ public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
+ throws IOException {
+ Put put = new Put(regionInfo.getRegionName());
+ addRegionInfo(put, regionInfo);
+ return put;
+ }
+
+ /**
+ * Generates and returns a Delete containing the region info for the catalog
+ * table
+ */
+ public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
+ if (regionInfo == null) {
+ throw new IllegalArgumentException("Can't make a delete for null region");
+ }
+ Delete delete = new Delete(regionInfo.getRegionName());
+ return delete;
+ }
+
+ /**
+ * Adds split daughters to the Put
+ */
+ public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
+ if (splitA != null) {
+ put.addImmutable(
+ HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
+ }
+ if (splitB != null) {
+ put.addImmutable(
+ HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
+ }
+ return put;
+ }
+
+ /**
+ * Put the passed p to the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param p Put to add to hbase:meta
+ * @throws IOException
+ */
+ static void putToMetaTable(final HConnection hConnection, final Put p)
+ throws IOException {
+ put(getMetaHTable(hConnection), p);
+ }
+
+ /**
+ * @param t Table to use (will be closed when done).
+ * @param p put to make
+ * @throws IOException
+ */
+ private static void put(final HTable t, final Put p) throws IOException {
+ try {
+ t.put(p);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Put the passed ps to the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param ps Put to add to hbase:meta
+ * @throws IOException
+ */
+ public static void putsToMetaTable(final HConnection hConnection, final List ps)
+ throws IOException {
+ HTable t = getMetaHTable(hConnection);
+ try {
+ t.put(ps);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Delete the passed d from the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param d Delete to add to hbase:meta
+ * @throws IOException
+ */
+ static void deleteFromMetaTable(final HConnection hConnection, final Delete d)
+ throws IOException {
+ List dels = new ArrayList(1);
+ dels.add(d);
+ deleteFromMetaTable(hConnection, dels);
+ }
+
+ /**
+ * Delete the passed deletes from the hbase:meta table.
+ * @param hConnection connection we're using
+ * @param deletes Deletes to add to hbase:meta This list should support #remove.
+ * @throws IOException
+ */
+ public static void deleteFromMetaTable(final HConnection hConnection, final List deletes)
+ throws IOException {
+ HTable t = getMetaHTable(hConnection);
+ try {
+ t.delete(deletes);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Execute the passed mutations against hbase:meta table.
+ * @param hConnection connection we're using
+ * @param mutations Puts and Deletes to execute on hbase:meta
+ * @throws IOException
+ */
+ public static void mutateMetaTable(final HConnection hConnection,
+ final List mutations)
+ throws IOException {
+ HTable t = getMetaHTable(hConnection);
+ try {
+ t.batch(mutations);
+ } catch (InterruptedException e) {
+ InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+ ie.initCause(e);
+ throw ie;
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Adds a hbase:meta row for the specified new region.
+ * @param hConnection connection we're using
+ * @param regionInfo region information
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HConnection hConnection,
+ HRegionInfo regionInfo)
+ throws IOException {
+ putToMetaTable(hConnection, makePutFromRegionInfo(regionInfo));
+ LOG.info("Added " + regionInfo.getRegionNameAsString());
+ }
+
+ /**
+ * Adds a hbase:meta row for the specified new region to the given catalog table. The
+ * HTable is not flushed or closed.
+ * @param meta the HTable for META
+ * @param regionInfo region information
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo) throws IOException {
+ addRegionToMeta(meta, regionInfo, null, null);
+ }
+
+ /**
+ * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
+ * not add its daughter's as different rows, but adds information about the daughters
+ * in the same row as the parent. Use
+ * {@link #splitRegion(org.apache.hadoop.hbase.client.HConnection,
+ * HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
+ * if you want to do that.
+ * @param meta the HTable for META
+ * @param regionInfo region information
+ * @param splitA first split daughter of the parent regionInfo
+ * @param splitB second split daughter of the parent regionInfo
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo,
+ HRegionInfo splitA, HRegionInfo splitB) throws IOException {
+ Put put = makePutFromRegionInfo(regionInfo);
+ addDaughtersToPut(put, splitA, splitB);
+ meta.put(put);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added " + regionInfo.getRegionNameAsString());
+ }
+ }
+
+ /**
+ * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
+ * not add its daughter's as different rows, but adds information about the daughters
+ * in the same row as the parent. Use
+ * {@link #splitRegion(HConnection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
+ * if you want to do that.
+ * @param hConnection connection we're using
+ * @param regionInfo region information
+ * @param splitA first split daughter of the parent regionInfo
+ * @param splitB second split daughter of the parent regionInfo
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionToMeta(HConnection hConnection, HRegionInfo regionInfo,
+ HRegionInfo splitA, HRegionInfo splitB) throws IOException {
+ HTable meta = getMetaHTable(hConnection);
+ try {
+ addRegionToMeta(meta, regionInfo, splitA, splitB);
+ } finally {
+ meta.close();
+ }
+ }
+
+ /**
+ * Adds a hbase:meta row for each of the specified new regions.
+ * @param hConnection connection we're using
+ * @param regionInfos region information list
+ * @throws IOException if problem connecting or updating meta
+ */
+ public static void addRegionsToMeta(HConnection hConnection,
+ List regionInfos)
+ throws IOException {
+ List puts = new ArrayList();
+ for (HRegionInfo regionInfo : regionInfos) {
+ puts.add(makePutFromRegionInfo(regionInfo));
+ }
+ putsToMetaTable(hConnection, puts);
+ LOG.info("Added " + puts.size());
+ }
+
+ /**
+ * Adds a daughter region entry to meta.
+ * @param regionInfo the region to put
+ * @param sn the location of the region
+ * @param openSeqNum the latest sequence number obtained when the region was open
+ */
+ public static void addDaughter(final HConnection hConnection,
+ final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
+ throws NotAllMetaRegionsOnlineException, IOException {
+ Put put = new Put(regionInfo.getRegionName());
+ addRegionInfo(put, regionInfo);
+ if (sn != null) {
+ addLocation(put, sn, openSeqNum);
+ }
+ putToMetaTable(hConnection, put);
+ LOG.info("Added daughter " + regionInfo.getEncodedName() +
+ (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
+ }
+
+ /**
+ * Merge the two regions into one in an atomic operation. Deletes the two
+ * merging regions in hbase:meta and adds the merged region with the information of
+ * two merging regions.
+ * @param hConnection connection we're using
+ * @param mergedRegion the merged region
+ * @param regionA
+ * @param regionB
+ * @param sn the location of the region
+ * @throws IOException
+ */
+ public static void mergeRegions(final HConnection hConnection,
+ HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
+ ServerName sn) throws IOException {
+ HTable meta = getMetaHTable(hConnection);
+ try {
+ HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
+
+ // Put for parent
+ Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
+ putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
+ regionA.toByteArray());
+ putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
+ regionB.toByteArray());
+
+ // Deletes for merging regions
+ Delete deleteA = makeDeleteFromRegionInfo(regionA);
+ Delete deleteB = makeDeleteFromRegionInfo(regionB);
+
+ // The merged is a new region, openSeqNum = 1 is fine.
+ addLocation(putOfMerged, sn, 1);
+
+ byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+ + HConstants.DELIMITER);
+ multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
+ } finally {
+ meta.close();
+ }
+ }
+
+ /**
+ * Splits the region into two in an atomic operation. Offlines the parent
+ * region with the information that it is split into two, and also adds
+ * the daughter regions. Does not add the location information to the daughter
+ * regions since they are not open yet.
+ * @param hConnection connection we're using
+ * @param parent the parent region which is split
+ * @param splitA Split daughter region A
+ * @param splitB Split daughter region A
+ * @param sn the location of the region
+ */
+ public static void splitRegion(final HConnection hConnection,
+ HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
+ ServerName sn) throws IOException {
+ HTable meta = getMetaHTable(hConnection);
+ try {
+ HRegionInfo copyOfParent = new HRegionInfo(parent);
+ copyOfParent.setOffline(true);
+ copyOfParent.setSplit(true);
+
+ //Put for parent
+ Put putParent = makePutFromRegionInfo(copyOfParent);
+ addDaughtersToPut(putParent, splitA, splitB);
+
+ //Puts for daughters
+ Put putA = makePutFromRegionInfo(splitA);
+ Put putB = makePutFromRegionInfo(splitB);
+
+ addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
+ addLocation(putB, sn, 1);
+
+ byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
+ multiMutate(meta, tableRow, putParent, putA, putB);
+ } finally {
+ meta.close();
+ }
+ }
+
+ /**
+ * Performs an atomic multi-Mutate operation against the given table.
+ */
+ private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
+ CoprocessorRpcChannel channel = table.coprocessorService(row);
+ MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
+ for (Mutation mutation : mutations) {
+ if (mutation instanceof Put) {
+ mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
+ } else if (mutation instanceof Delete) {
+ mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
+ } else {
+ throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
+ + mutation.getClass().getName());
+ }
+ }
+
+ MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
+ MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
+ try {
+ service.mutateRows(null, mmrBuilder.build());
+ } catch (ServiceException ex) {
+ ProtobufUtil.toIOException(ex);
+ }
+ }
+
+
+ /**
+ * Updates the location of the specified hbase:meta region in ROOT to be the
+ * specified server hostname and startcode.
+ *
+ * Uses passed catalog tracker to get a connection to the server hosting
+ * ROOT and makes edits to that region.
+ *
+ * @param hConnection connection we're using
+ * @param regionInfo region to update location of
+ * @param sn Server name
+ * @param openSeqNum the latest sequence number obtained when the region was open
+ * @throws IOException
+ * @throws java.net.ConnectException Usually because the regionserver carrying hbase:meta
+ * is down.
+ * @throws NullPointerException Because no -ROOT- server connection
+ */
+ public static void updateMetaLocation(HConnection hConnection,
+ HRegionInfo regionInfo, ServerName sn, long openSeqNum)
+ throws IOException, ConnectException {
+ updateLocation(hConnection, regionInfo, sn, openSeqNum);
+ }
+
+ /**
+ * Updates the location of the specified region in hbase:meta to be the specified
+ * server hostname and startcode.
+ *
+ * Uses passed catalog tracker to get a connection to the server hosting
+ * hbase:meta and makes edits to that region.
+ *
+ * @param hConnection connection we're using
+ * @param regionInfo region to update location of
+ * @param sn Server name
+ * @throws IOException
+ */
+ public static void updateRegionLocation(HConnection hConnection,
+ HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
+ throws IOException {
+ updateLocation(hConnection, regionInfo, sn, updateSeqNum);
+ }
+
+ /**
+ * Updates the location of the specified region to be the specified server.
+ *
+ * Connects to the specified server which should be hosting the specified
+ * catalog region name to perform the edit.
+ *
+ * @param hConnection connection we're using
+ * @param regionInfo region to update location of
+ * @param sn Server name
+ * @param openSeqNum the latest sequence number obtained when the region was open
+ * @throws IOException In particular could throw {@link java.net.ConnectException}
+ * if the server is down on other end.
+ */
+ private static void updateLocation(final HConnection hConnection,
+ HRegionInfo regionInfo, ServerName sn, long openSeqNum)
+ throws IOException {
+ Put put = new Put(regionInfo.getRegionName());
+ addLocation(put, sn, openSeqNum);
+ putToMetaTable(hConnection, put);
+ LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
+ " with server=" + sn);
+ }
+
+ /**
+ * Deletes the specified region from META.
+ * @param hConnection connection we're using
+ * @param regionInfo region to be deleted from META
+ * @throws IOException
+ */
+ public static void deleteRegion(HConnection hConnection,
+ HRegionInfo regionInfo)
+ throws IOException {
+ Delete delete = new Delete(regionInfo.getRegionName());
+ deleteFromMetaTable(hConnection, delete);
+ LOG.info("Deleted " + regionInfo.getRegionNameAsString());
+ }
+
+ /**
+ * Deletes the specified regions from META.
+ * @param hConnection connection we're using
+ * @param regionsInfo list of regions to be deleted from META
+ * @throws IOException
+ */
+ public static void deleteRegions(HConnection hConnection,
+ List regionsInfo) throws IOException {
+ List deletes = new ArrayList(regionsInfo.size());
+ for (HRegionInfo hri: regionsInfo) {
+ deletes.add(new Delete(hri.getRegionName()));
+ }
+ deleteFromMetaTable(hConnection, deletes);
+ LOG.info("Deleted " + regionsInfo);
+ }
+
+ /**
+ * Adds and Removes the specified regions from hbase:meta
+ * @param hConnection connection we're using
+ * @param regionsToRemove list of regions to be deleted from META
+ * @param regionsToAdd list of regions to be added to META
+ * @throws IOException
+ */
+ public static void mutateRegions(HConnection hConnection,
+ final List regionsToRemove,
+ final List regionsToAdd)
+ throws IOException {
+ List mutation = new ArrayList();
+ if (regionsToRemove != null) {
+ for (HRegionInfo hri: regionsToRemove) {
+ mutation.add(new Delete(hri.getRegionName()));
+ }
+ }
+ if (regionsToAdd != null) {
+ for (HRegionInfo hri: regionsToAdd) {
+ mutation.add(makePutFromRegionInfo(hri));
+ }
+ }
+ mutateMetaTable(hConnection, mutation);
+ if (regionsToRemove != null && regionsToRemove.size() > 0) {
+ LOG.debug("Deleted " + regionsToRemove);
+ }
+ if (regionsToAdd != null && regionsToAdd.size() > 0) {
+ LOG.debug("Added " + regionsToAdd);
+ }
+ }
+
+ /**
+ * Overwrites the specified regions from hbase:meta
+ * @param hConnection connection we're using
+ * @param regionInfos list of regions to be added to META
+ * @throws IOException
+ */
+ public static void overwriteRegions(HConnection hConnection,
+ List regionInfos) throws IOException {
+ deleteRegions(hConnection, regionInfos);
+ // Why sleep? This is the easiest way to ensure that the previous deletes does not
+ // eclipse the following puts, that might happen in the same ts from the server.
+ // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
+ // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
+ Threads.sleep(20);
+ addRegionsToMeta(hConnection, regionInfos);
+ LOG.info("Overwritten " + regionInfos);
+ }
+
+ /**
+ * Deletes merge qualifiers for the specified merged region.
+ * @param hConnection connection we're using
+ * @param mergedRegion
+ * @throws IOException
+ */
+ public static void deleteMergeQualifiers(HConnection hConnection,
+ final HRegionInfo mergedRegion) throws IOException {
+ Delete delete = new Delete(mergedRegion.getRegionName());
+ delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
+ delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
+ deleteFromMetaTable(hConnection, delete);
+ LOG.info("Deleted references in merged region "
+ + mergedRegion.getRegionNameAsString() + ", qualifier="
+ + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
+ + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
+ }
+
+ private static Put addRegionInfo(final Put p, final HRegionInfo hri)
+ throws IOException {
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+ hri.toByteArray());
+ return p;
+ }
+
+ private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ Bytes.toBytes(sn.getHostAndPort()));
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+ Bytes.toBytes(sn.getStartcode()));
+ p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
+ Bytes.toBytes(openSeqNum));
+ return p;
+ }
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 5a2fbbd..fc8b72f 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -59,8 +59,7 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -127,7 +126,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
@@ -143,6 +141,7 @@ import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
@@ -232,30 +231,6 @@ public class HBaseAdmin implements Admin {
this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
}
- /**
- * @return A new CatalogTracker instance; call {@link #cleanupCatalogTracker(CatalogTracker)}
- * to cleanup the returned catalog tracker.
- * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException
- * @throws IOException
- * @see #cleanupCatalogTracker(CatalogTracker)
- */
- private synchronized CatalogTracker getCatalogTracker()
- throws ZooKeeperConnectionException, IOException {
- CatalogTracker ct = null;
- try {
- ct = new CatalogTracker(this.conf);
- ct.start();
- } catch (InterruptedException e) {
- // Let it out as an IOE for now until we redo all so tolerate IEs
- throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
- }
- return ct;
- }
-
- private void cleanupCatalogTracker(final CatalogTracker ct) {
- ct.stop();
- }
-
@Override
public void abort(String why, Throwable e) {
// Currently does nothing but throw the passed message and exception
@@ -288,16 +263,8 @@ public class HBaseAdmin implements Admin {
* @return True if table exists already.
* @throws IOException
*/
- public boolean tableExists(final TableName tableName)
- throws IOException {
- boolean b = false;
- CatalogTracker ct = getCatalogTracker();
- try {
- b = MetaReader.tableExists(ct, tableName);
- } finally {
- cleanupCatalogTracker(ct);
- }
- return b;
+ public boolean tableExists(final TableName tableName) throws IOException {
+ return MetaTableAccessor.tableExists(connection, tableName);
}
public boolean tableExists(final byte[] tableName)
@@ -659,7 +626,7 @@ public class HBaseAdmin implements Admin {
for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
try {
HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
- Scan scan = MetaReader.getScanForTableName(tableName);
+ Scan scan = MetaTableAccessor.getScanForTableName(tableName);
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
ScanRequest request = RequestConverter.buildScanRequest(
firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
@@ -1362,28 +1329,23 @@ public class HBaseAdmin implements Admin {
* @throws IOException if a remote or network exception occurs
*/
public void closeRegion(final byte [] regionname, final String serverName)
- throws IOException {
- CatalogTracker ct = getCatalogTracker();
- try {
- if (serverName != null) {
- Pair pair = MetaReader.getRegion(ct, regionname);
- if (pair == null || pair.getFirst() == null) {
- throw new UnknownRegionException(Bytes.toStringBinary(regionname));
- } else {
- closeRegion(ServerName.valueOf(serverName), pair.getFirst());
- }
+ throws IOException {
+ if (serverName != null) {
+ Pair pair = MetaTableAccessor.getRegion(connection, regionname);
+ if (pair == null || pair.getFirst() == null) {
+ throw new UnknownRegionException(Bytes.toStringBinary(regionname));
} else {
- Pair pair = MetaReader.getRegion(ct, regionname);
- if (pair == null) {
- throw new UnknownRegionException(Bytes.toStringBinary(regionname));
- } else if (pair.getSecond() == null) {
- throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
- } else {
- closeRegion(pair.getSecond(), pair.getFirst());
- }
+ closeRegion(ServerName.valueOf(serverName), pair.getFirst());
+ }
+ } else {
+ Pair pair = MetaTableAccessor.getRegion(connection, regionname);
+ if (pair == null) {
+ throw new UnknownRegionException(Bytes.toStringBinary(regionname));
+ } else if (pair.getSecond() == null) {
+ throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
+ } else {
+ closeRegion(pair.getSecond(), pair.getFirst());
}
- } finally {
- cleanupCatalogTracker(ct);
}
}
@@ -1477,28 +1439,23 @@ public class HBaseAdmin implements Admin {
*/
public void flush(final byte[] tableNameOrRegionName)
throws IOException, InterruptedException {
- CatalogTracker ct = getCatalogTracker();
- try {
- Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
- if (regionServerPair != null) {
- if (regionServerPair.getSecond() == null) {
- throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
- } else {
- flush(regionServerPair.getSecond(), regionServerPair.getFirst());
- }
+ Pair regionServerPair
+ = getRegion(tableNameOrRegionName);
+ if (regionServerPair != null) {
+ if (regionServerPair.getSecond() == null) {
+ throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
} else {
- final TableName tableName = checkTableExists(
- TableName.valueOf(tableNameOrRegionName), ct);
- if (isTableDisabled(tableName)) {
- LOG.info("Table is disabled: " + tableName.getNameAsString());
- return;
- }
- execProcedure("flush-table-proc", tableName.getNameAsString(),
- new HashMap());
+ flush(regionServerPair.getSecond(), regionServerPair.getFirst());
}
- } finally {
- cleanupCatalogTracker(ct);
+ } else {
+ final TableName tableName = checkTableExists(
+ TableName.valueOf(tableNameOrRegionName));
+ if (isTableDisabled(tableName)) {
+ LOG.info("Table is disabled: " + tableName.getNameAsString());
+ return;
+ }
+ execProcedure("flush-table-proc", tableName.getNameAsString(),
+ new HashMap());
}
}
@@ -1636,10 +1593,12 @@ public class HBaseAdmin implements Admin {
private void compact(final byte[] tableNameOrRegionName,
final byte[] columnFamily,final boolean major)
throws IOException, InterruptedException {
- CatalogTracker ct = getCatalogTracker();
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "hbase-admin-on-" + connection.toString(),
+ new ThrowableAbortable());
try {
Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
+ = getRegion(tableNameOrRegionName);
if (regionServerPair != null) {
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -1648,10 +1607,10 @@ public class HBaseAdmin implements Admin {
}
} else {
final TableName tableName =
- checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+ checkTableExists(TableName.valueOf(tableNameOrRegionName));
List> pairs =
- MetaReader.getTableRegionsAndLocations(ct,
- tableName);
+ MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
+ tableName);
for (Pair pair: pairs) {
if (pair.getFirst().isOffline()) continue;
if (pair.getSecond() == null) continue;
@@ -1667,7 +1626,7 @@ public class HBaseAdmin implements Admin {
}
}
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
}
@@ -1964,10 +1923,12 @@ public class HBaseAdmin implements Admin {
*/
public void split(final byte[] tableNameOrRegionName,
final byte [] splitPoint) throws IOException, InterruptedException {
- CatalogTracker ct = getCatalogTracker();
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "hbase-admin-on-" + connection.toString(),
+ new ThrowableAbortable());
try {
Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
+ = getRegion(tableNameOrRegionName);
if (regionServerPair != null) {
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -1976,10 +1937,10 @@ public class HBaseAdmin implements Admin {
}
} else {
final TableName tableName =
- checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+ checkTableExists(TableName.valueOf(tableNameOrRegionName));
List> pairs =
- MetaReader.getTableRegionsAndLocations(ct,
- tableName);
+ MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection,
+ tableName);
for (Pair pair: pairs) {
// May not be a server for a particular row
if (pair.getSecond() == null) continue;
@@ -1993,7 +1954,7 @@ public class HBaseAdmin implements Admin {
}
}
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
}
@@ -2045,19 +2006,18 @@ public class HBaseAdmin implements Admin {
/**
* @param tableNameOrRegionName Name of a table or name of a region.
- * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
* @return a pair of HRegionInfo and ServerName if tableNameOrRegionName is
- * a verified region name (we call {@link MetaReader#getRegion( CatalogTracker, byte[])}
+ * a verified region name (we call {@link
+ * org.apache.hadoop.hbase.catalog.MetaTableAccessor#getRegion(HConnection, byte[])}
* else null.
* Throw an exception if tableNameOrRegionName is null.
* @throws IOException
*/
- Pair getRegion(final byte[] tableNameOrRegionName,
- final CatalogTracker ct) throws IOException {
+ Pair getRegion(final byte[] tableNameOrRegionName) throws IOException {
if (tableNameOrRegionName == null) {
throw new IllegalArgumentException("Pass a table name or region name");
}
- Pair pair = MetaReader.getRegion(ct, tableNameOrRegionName);
+ Pair pair = MetaTableAccessor.getRegion(connection, tableNameOrRegionName);
if (pair == null) {
final AtomicReference> result =
new AtomicReference>(null);
@@ -2099,16 +2059,10 @@ public class HBaseAdmin implements Admin {
HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
}
- CatalogTracker ct = getCatalogTracker();
byte[] tmp = regionNameOrEncodedRegionName;
- try {
- Pair regionServerPair
- = getRegion(regionNameOrEncodedRegionName, ct);
- if (regionServerPair != null && regionServerPair.getFirst() != null) {
- tmp = regionServerPair.getFirst().getRegionName();
- }
- } finally {
- cleanupCatalogTracker(ct);
+ Pair regionServerPair = getRegion(regionNameOrEncodedRegionName);
+ if (regionServerPair != null && regionServerPair.getFirst() != null) {
+ tmp = regionServerPair.getFirst().getRegionName();
}
return tmp;
}
@@ -2116,16 +2070,13 @@ public class HBaseAdmin implements Admin {
/**
* Check if table exists or not
* @param tableName Name of a table.
- * @param ct A {@link CatalogTracker} instance (caller of this method usually has one).
* @return tableName instance
* @throws IOException if a remote or network exception occurs.
* @throws TableNotFoundException if table does not exist.
*/
- //TODO rename this method
- private TableName checkTableExists(
- final TableName tableName, CatalogTracker ct)
+ private TableName checkTableExists(final TableName tableName)
throws IOException {
- if (!MetaReader.tableExists(ct, tableName)) {
+ if (!MetaTableAccessor.tableExists(connection, tableName)) {
throw new TableNotFoundException(tableName);
}
return tableName;
@@ -2408,12 +2359,14 @@ public class HBaseAdmin implements Admin {
*/
public List getTableRegions(final TableName tableName)
throws IOException {
- CatalogTracker ct = getCatalogTracker();
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "hbase-admin-on-" + connection.toString(),
+ new ThrowableAbortable());
List Regions = null;
try {
- Regions = MetaReader.getTableRegions(ct, tableName, true);
+ Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
return Regions;
}
@@ -2524,10 +2477,11 @@ public class HBaseAdmin implements Admin {
public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
throws IOException, InterruptedException {
CompactionState state = CompactionState.NONE;
- CatalogTracker ct = getCatalogTracker();
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, "hbase-admin-on-" + connection.toString(),
+ new ThrowableAbortable());
try {
- Pair regionServerPair
- = getRegion(tableNameOrRegionName, ct);
+ Pair regionServerPair = getRegion(tableNameOrRegionName);
if (regionServerPair != null) {
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName));
@@ -2541,9 +2495,9 @@ public class HBaseAdmin implements Admin {
}
} else {
final TableName tableName =
- checkTableExists(TableName.valueOf(tableNameOrRegionName), ct);
+ checkTableExists(TableName.valueOf(tableNameOrRegionName));
List> pairs =
- MetaReader.getTableRegionsAndLocations(ct, tableName);
+ MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
for (Pair pair: pairs) {
if (pair.getFirst().isOffline()) continue;
if (pair.getSecond() == null) continue;
@@ -2592,7 +2546,7 @@ public class HBaseAdmin implements Admin {
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
} finally {
- cleanupCatalogTracker(ct);
+ zookeeper.close();
}
return state;
}
@@ -3433,4 +3387,20 @@ public class HBaseAdmin implements Admin {
public CoprocessorRpcChannel coprocessorService() {
return new MasterCoprocessorRpcChannel(connection);
}
+
+ /**
+ * Simple {@link Abortable}, throwing RuntimeException on abort.
+ */
+ private static class ThrowableAbortable implements Abortable {
+
+ @Override
+ public void abort(String why, Throwable e) {
+ throw new RuntimeException(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return true;
+ }
+ }
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index 0305821..ebb39f4 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
@@ -49,7 +48,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
* connections are managed at a lower level.
*
* HConnections are used by {@link HTable} mostly but also by
- * {@link HBaseAdmin}, and {@link CatalogTracker}. HConnection instances can be shared. Sharing
+ * {@link HBaseAdmin}, and {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}.
+ * HConnection instances can be shared. Sharing
* is usually what you want because rather than each HConnection instance
* having to do its own discovery of regions out on the cluster, instead, all
* clients get to share the one cache of locations. {@link HConnectionManager} does the
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 529b7f6..2bbbf0e 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -56,7 +56,7 @@ class ZooKeeperRegistry implements Registry {
if (LOG.isTraceEnabled()) {
LOG.trace("Looking up meta region location in ZK," + " connection=" + this);
}
- ServerName servername = MetaRegionTracker.blockUntilAvailable(zkw, hci.rpcTimeout);
+ ServerName servername = new MetaTableLocator().blockUntilAvailable(zkw, hci.rpcTimeout);
if (LOG.isTraceEnabled()) {
LOG.trace("Looked up meta region location, connection=" + this +
"; serverName=" + ((servername == null) ? "null" : servername));
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
deleted file mode 100644
index a3a7d02..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaRegionTracker.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Tracks the meta region server location node in zookeeper.
- * Meta region location is set by RegionServerServices.
- * This class has a watcher on the meta location and notices changes.
- */
-@InterfaceAudience.Private
-public class MetaRegionTracker extends ZooKeeperNodeTracker {
- /**
- * Creates a meta region location tracker.
- *
- *
After construction, use {@link #start} to kick off tracking.
- *
- * @param watcher
- * @param abortable
- */
- public MetaRegionTracker(ZooKeeperWatcher watcher, Abortable abortable) {
- super(watcher, watcher.metaServerZNode, abortable);
- }
-
- /**
- * Checks if the meta region location is available.
- * @return true if meta region location is available, false if not
- */
- public boolean isLocationAvailable() {
- return super.getData(true) != null;
- }
-
- /**
- * Gets the meta region location, if available. Does not block. Sets a watcher.
- * @return server name or null if we failed to get the data.
- * @throws InterruptedException
- */
- public ServerName getMetaRegionLocation() throws InterruptedException {
- try {
- return ServerName.parseFrom(super.getData(true));
- } catch (DeserializationException e) {
- LOG.warn("Failed parse", e);
- return null;
- }
- }
-
- /**
- * Gets the meta region location, if available. Does not block. Does not set
- * a watcher (In this regard it differs from {@link #getMetaRegionLocation}.
- * @param zkw
- * @return server name or null if we failed to get the data.
- * @throws KeeperException
- */
- public static ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw)
- throws KeeperException {
- try {
- return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode));
- } catch (DeserializationException e) {
- throw ZKUtil.convert(e);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- return null;
- }
- }
-
- /**
- * Gets the meta region location, if available, and waits for up to the
- * specified timeout if not immediately available.
- * Given the zookeeper notification could be delayed, we will try to
- * get the latest data.
- * @param timeout maximum time to wait, in millis
- * @return server name for server hosting meta region formatted as per
- * {@link ServerName}, or null if none available
- * @throws InterruptedException if interrupted while waiting
- */
- public ServerName waitMetaRegionLocation(long timeout)
- throws InterruptedException {
- if (false == checkIfBaseNodeAvailable()) {
- String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
- + "There could be a mismatch with the one configured in the master.";
- LOG.error(errorMsg);
- throw new IllegalArgumentException(errorMsg);
- }
- try {
- return ServerName.parseFrom(super.blockUntilAvailable(timeout, true));
- } catch (DeserializationException e) {
- LOG.warn("Failed parse", e);
- return null;
- }
- }
-
- /**
- * Sets the location of hbase:meta in ZooKeeper to the
- * specified server address.
- * @param zookeeper zookeeper reference
- * @param location The server hosting hbase:meta
- * @throws KeeperException unexpected zookeeper exception
- */
- public static void setMetaLocation(ZooKeeperWatcher zookeeper,
- final ServerName location)
- throws KeeperException {
- LOG.info("Setting hbase:meta region location in ZooKeeper as " + location);
- // Make the MetaRegionServer pb and then get its bytes and save this as
- // the znode content.
- byte [] data = toByteArray(location);
- try {
- ZKUtil.createAndWatch(zookeeper, zookeeper.metaServerZNode, data);
- } catch(KeeperException.NodeExistsException nee) {
- LOG.debug("META region location already existed, updated location");
- ZKUtil.setData(zookeeper, zookeeper.metaServerZNode, data);
- }
- }
-
- /**
- * Build up the znode content.
- * @param sn What to put into the znode.
- * @return The content of the meta-region-server znode
- */
- static byte [] toByteArray(final ServerName sn) {
- // ZNode content is a pb message preceded by some pb magic.
- HBaseProtos.ServerName pbsn =
- HBaseProtos.ServerName.newBuilder()
- .setHostName(sn.getHostname())
- .setPort(sn.getPort())
- .setStartCode(sn.getStartcode())
- .build();
-
- ZooKeeperProtos.MetaRegionServer pbrsr =
- ZooKeeperProtos.MetaRegionServer.newBuilder()
- .setServer(pbsn)
- .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
- .build();
- return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
- }
-
- /**
- * Deletes the location of hbase:meta in ZooKeeper.
- * @param zookeeper zookeeper reference
- * @throws KeeperException unexpected zookeeper exception
- */
- public static void deleteMetaLocation(ZooKeeperWatcher zookeeper)
- throws KeeperException {
- LOG.info("Unsetting hbase:meta region location in ZooKeeper");
- try {
- // Just delete the node. Don't need any watches.
- ZKUtil.deleteNode(zookeeper, zookeeper.metaServerZNode);
- } catch(KeeperException.NoNodeException nne) {
- // Has already been deleted
- }
- }
-
- /**
- * Wait until the meta region is available.
- * @param zkw
- * @param timeout
- * @return ServerName or null if we timed out.
- * @throws InterruptedException
- */
- public static ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
- final long timeout)
- throws InterruptedException {
- byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.metaServerZNode, timeout);
- if (data == null) return null;
- try {
- return ServerName.parseFrom(data);
- } catch (DeserializationException e) {
- LOG.warn("Failed parse", e);
- return null;
- }
- }
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
new file mode 100644
index 0000000..f1d17c2
--- /dev/null
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -0,0 +1,404 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import com.google.common.base.Stopwatch;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.zookeeper.KeeperException;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.NoRouteToHostException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.rmi.UnknownHostException;
+
+/**
+ * Utility class to perform operation (get/wait for/verify/set/delete) on znode in ZooKeeper
+ * which keeps hbase:meta region server location.
+ *
+ * Stateless class with a bunch of static methods. Doesn't manage resources passed in
+ * (e.g. HConnection, ZooKeeperWatcher etc).
+ *
+ * Meta region location is set by RegionServerServices.
+ * This class doesn't use ZK watchers, rather accesses ZK directly.
+ *
+ * This class it stateless. The only reason it's not made a non-instantiable util class
+ * with a collection of static methods is that it'd be rather hard to mock properly in tests.
+ *
+ * TODO: rewrite using RPC calls to master to find out about hbase:meta.
+ */
+@InterfaceAudience.Private
+public class MetaTableLocator {
+ private static final Log LOG = LogFactory.getLog(MetaTableLocator.class);
+
+ static final byte [] META_REGION_NAME =
+ HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
+
+ // only needed to allow non-timeout infinite waits to stop when cluster shuts down
+ private volatile boolean stopped = false;
+
+ /**
+ * Checks if the meta region location is available.
+ * @return true if meta region location is available, false if not
+ */
+ public boolean isLocationAvailable(ZooKeeperWatcher zkw) {
+ try {
+ return ZKUtil.getData(zkw, zkw.metaServerZNode) != null;
+ } catch(KeeperException e) {
+ LOG.error("ZK error trying to get hbase:meta from ZooKeeper");
+ return false;
+ } catch (InterruptedException e) {
+ LOG.error("ZK error trying to get hbase:meta from ZooKeeper");
+ return false;
+ }
+ }
+
+ /**
+ * Gets the meta region location, if available. Does not block.
+ * @param zkw zookeeper connection to use
+ * @return server name or null if we failed to get the data.
+ */
+ public ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw) {
+ try {
+ try {
+ return ServerName.parseFrom(ZKUtil.getData(zkw, zkw.metaServerZNode));
+ } catch (DeserializationException e) {
+ throw ZKUtil.convert(e);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return null;
+ }
+ } catch (KeeperException ke) {
+ return null;
+ }
+ }
+
+ /**
+ * Gets the meta region location, if available, and waits for up to the
+ * specified timeout if not immediately available.
+ * Given the zookeeper notification could be delayed, we will try to
+ * get the latest data.
+ * @param timeout maximum time to wait, in millis
+ * @return server name for server hosting meta region formatted as per
+ * {@link ServerName}, or null if none available
+ * @throws InterruptedException if interrupted while waiting
+ */
+ public ServerName waitMetaRegionLocation(ZooKeeperWatcher zkw, long timeout)
+ throws InterruptedException, NotAllMetaRegionsOnlineException {
+ try {
+ if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) {
+ String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
+ + "There could be a mismatch with the one configured in the master.";
+ LOG.error(errorMsg);
+ throw new IllegalArgumentException(errorMsg);
+ }
+ } catch (KeeperException e) {
+ throw new IllegalStateException("KeeperException while trying to check baseZNode:", e);
+ }
+ ServerName sn = blockUntilAvailable(zkw, timeout);
+
+ if (sn == null) {
+ throw new NotAllMetaRegionsOnlineException("Timed out; " + timeout + "ms");
+ }
+
+ return sn;
+ }
+
+ /**
+ * Waits indefinitely for availability of hbase:meta. Used during
+ * cluster startup. Does not verify meta, just that something has been
+ * set up in zk.
+ * @see #waitMetaRegionLocation(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher, long)
+ * @throws InterruptedException if interrupted while waiting
+ */
+ public void waitMetaRegionLocation(ZooKeeperWatcher zkw) throws InterruptedException {
+ Stopwatch stopwatch = new Stopwatch().start();
+ while (!stopped) {
+ try {
+ if (waitMetaRegionLocation(zkw, 100) != null) break;
+ long sleepTime = stopwatch.elapsedMillis();
+ // +1 in case sleepTime=0
+ if ((sleepTime + 1) % 10000 == 0) {
+ LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
+ }
+ } catch (NotAllMetaRegionsOnlineException e) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("hbase:meta still not available, sleeping and retrying." +
+ " Reason: " + e.getMessage());
+ }
+ }
+ }
+ }
+
+ /**
+ * Verify hbase:meta is deployed and accessible.
+ * @param timeout How long to wait on zk for meta address (passed through to
+ * the internal call to {@link #getMetaServerConnection}.
+ * @return True if the hbase:meta location is healthy.
+ * @throws java.io.IOException
+ * @throws InterruptedException
+ */
+ public boolean verifyMetaRegionLocation(HConnection hConnection,
+ ZooKeeperWatcher zkw, final long timeout)
+ throws InterruptedException, IOException {
+ AdminProtos.AdminService.BlockingInterface service = null;
+ try {
+ service = getMetaServerConnection(hConnection, zkw, timeout);
+ } catch (NotAllMetaRegionsOnlineException e) {
+ // Pass
+ } catch (ServerNotRunningYetException e) {
+ // Pass -- remote server is not up so can't be carrying root
+ } catch (UnknownHostException e) {
+ // Pass -- server name doesn't resolve so it can't be assigned anything.
+ } catch (RegionServerStoppedException e) {
+ // Pass -- server name sends us to a server that is dying or already dead.
+ }
+ return (service == null)? false:
+ verifyRegionLocation(service,
+ getMetaRegionLocation(zkw), META_REGION_NAME);
+ }
+
+ /**
+ * Verify we can connect to hostingServer and that its carrying
+ * regionName.
+ * @param hostingServer Interface to the server hosting regionName
+ * @param address The servername that goes with the metaServer
+ * Interface. Used logging.
+ * @param regionName The regionname we are interested in.
+ * @return True if we were able to verify the region located at other side of
+ * the Interface.
+ * @throws IOException
+ */
+ // TODO: We should be able to get the ServerName from the AdminProtocol
+ // rather than have to pass it in. Its made awkward by the fact that the
+ // HRI is likely a proxy against remote server so the getServerName needs
+ // to be fixed to go to a local method or to a cache before we can do this.
+ private boolean verifyRegionLocation(AdminService.BlockingInterface hostingServer,
+ final ServerName address, final byte [] regionName)
+ throws IOException {
+ if (hostingServer == null) {
+ LOG.info("Passed hostingServer is null");
+ return false;
+ }
+ Throwable t;
+ try {
+ // Try and get regioninfo from the hosting server.
+ return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null;
+ } catch (ConnectException e) {
+ t = e;
+ } catch (RetriesExhaustedException e) {
+ t = e;
+ } catch (RemoteException e) {
+ IOException ioe = e.unwrapRemoteException();
+ t = ioe;
+ } catch (IOException e) {
+ Throwable cause = e.getCause();
+ if (cause != null && cause instanceof EOFException) {
+ t = cause;
+ } else if (cause != null && cause.getMessage() != null
+ && cause.getMessage().contains("Connection reset")) {
+ t = cause;
+ } else {
+ t = e;
+ }
+ }
+ LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
+ " at address=" + address + ", exception=" + t);
+ return false;
+ }
+
+ /**
+ * Gets a connection to the server hosting meta, as reported by ZooKeeper,
+ * waiting up to the specified timeout for availability.
+ *
WARNING: Does not retry. Use an {@link org.apache.hadoop.hbase.client.HTable} instead.
+ * @param timeout How long to wait on meta location
+ * @return connection to server hosting meta
+ * @throws InterruptedException
+ * @throws NotAllMetaRegionsOnlineException if timed out waiting
+ * @throws IOException
+ */
+ private AdminService.BlockingInterface getMetaServerConnection(HConnection hConnection,
+ ZooKeeperWatcher zkw, long timeout)
+ throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
+ return getCachedConnection(hConnection, waitMetaRegionLocation(zkw, timeout));
+ }
+
+ /**
+ * @param sn ServerName to get a connection against.
+ * @return The AdminProtocol we got when we connected to sn
+ * May have come from cache, may not be good, may have been setup by this
+ * invocation, or may be null.
+ * @throws IOException
+ */
+ @SuppressWarnings("deprecation")
+ private static AdminService.BlockingInterface getCachedConnection(HConnection hConnection,
+ ServerName sn)
+ throws IOException {
+ if (sn == null) {
+ return null;
+ }
+ AdminService.BlockingInterface service = null;
+ try {
+ service = hConnection.getAdmin(sn);
+ } catch (RetriesExhaustedException e) {
+ if (e.getCause() != null && e.getCause() instanceof ConnectException) {
+ // Catch this; presume it means the cached connection has gone bad.
+ } else {
+ throw e;
+ }
+ } catch (SocketTimeoutException e) {
+ LOG.debug("Timed out connecting to " + sn);
+ } catch (NoRouteToHostException e) {
+ LOG.debug("Connecting to " + sn, e);
+ } catch (SocketException e) {
+ LOG.debug("Exception connecting to " + sn);
+ } catch (UnknownHostException e) {
+ LOG.debug("Unknown host exception connecting to " + sn);
+ } catch (RpcClient.FailedServerException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Server " + sn + " is in failed server list.");
+ }
+ } catch (IOException ioe) {
+ Throwable cause = ioe.getCause();
+ if (ioe instanceof ConnectException) {
+ // Catch. Connect refused.
+ } else if (cause != null && cause instanceof EOFException) {
+ // Catch. Other end disconnected us.
+ } else if (cause != null && cause.getMessage() != null &&
+ cause.getMessage().toLowerCase().contains("connection reset")) {
+ // Catch. Connection reset.
+ } else {
+ throw ioe;
+ }
+
+ }
+ return service;
+ }
+
+ /**
+ * Sets the location of hbase:meta in ZooKeeper to the
+ * specified server address.
+ * @param zookeeper zookeeper reference
+ * @param location The server hosting hbase:meta
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public static void setMetaLocation(ZooKeeperWatcher zookeeper,
+ final ServerName location)
+ throws KeeperException {
+ LOG.info("Setting hbase:meta region location in ZooKeeper as " + location);
+ // Make the MetaRegionServer pb and then get its bytes and save this as
+ // the znode content.
+ byte [] data = toByteArray(location);
+ try {
+ ZKUtil.createAndWatch(zookeeper, zookeeper.metaServerZNode, data);
+ } catch(KeeperException.NodeExistsException nee) {
+ LOG.debug("META region location already existed, updated location");
+ ZKUtil.setData(zookeeper, zookeeper.metaServerZNode, data);
+ }
+ }
+
+ /**
+ * Build up the znode content.
+ * @param sn What to put into the znode.
+ * @return The content of the meta-region-server znode
+ */
+ private static byte [] toByteArray(final ServerName sn) {
+ // ZNode content is a pb message preceded by some pb magic.
+ HBaseProtos.ServerName pbsn =
+ HBaseProtos.ServerName.newBuilder()
+ .setHostName(sn.getHostname())
+ .setPort(sn.getPort())
+ .setStartCode(sn.getStartcode())
+ .build();
+
+ ZooKeeperProtos.MetaRegionServer pbrsr =
+ ZooKeeperProtos.MetaRegionServer.newBuilder()
+ .setServer(pbsn)
+ .setRpcVersion(HConstants.RPC_CURRENT_VERSION)
+ .build();
+ return ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
+ }
+
+ /**
+ * Deletes the location of hbase:meta in ZooKeeper.
+ * @param zookeeper zookeeper reference
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public void deleteMetaLocation(ZooKeeperWatcher zookeeper)
+ throws KeeperException {
+ LOG.info("Unsetting hbase:meta region location in ZooKeeper");
+ try {
+ // Just delete the node. Don't need any watches.
+ ZKUtil.deleteNode(zookeeper, zookeeper.metaServerZNode);
+ } catch(KeeperException.NoNodeException nne) {
+ // Has already been deleted
+ }
+ }
+
+ /**
+ * Wait until the meta region is available.
+ * @param zkw zookeeper connection to use
+ * @param timeout maximum time to wait, in millis
+ * @return ServerName or null if we timed out.
+ * @throws InterruptedException
+ */
+ public ServerName blockUntilAvailable(final ZooKeeperWatcher zkw,
+ final long timeout)
+ throws InterruptedException {
+ byte [] data = ZKUtil.blockUntilAvailable(zkw, zkw.metaServerZNode, timeout);
+ if (data == null) return null;
+ try {
+ return ServerName.parseFrom(data);
+ } catch (DeserializationException e) {
+ LOG.warn("Failed parse", e);
+ return null;
+ }
+ }
+
+ /**
+ * Stop working.
+ * Interrupts any ongoing waits.
+ */
+ public void stop() {
+ if (!stopped) {
+ LOG.debug("Stopping MetaTableLocator");
+ stopped = true;
+ }
+ }
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index a68d8c9..54b720b 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent;
@@ -1590,7 +1589,7 @@ public class ZKUtil {
zkw.backupMasterAddressesZNode)) {
sb.append("\n ").append(child);
}
- sb.append("\nRegion server holding hbase:meta: " + MetaRegionTracker.getMetaRegionLocation(zkw));
+ sb.append("\nRegion server holding hbase:meta: " + new MetaTableLocator().getMetaRegionLocation(zkw));
sb.append("\nRegion servers:");
for (String child : listChildrenNoWatch(zkw, zkw.rsZNode)) {
sb.append("\n ").append(child);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
deleted file mode 100644
index 89fa0a9..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.net.ConnectException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * Writes region and assignment information to hbase:meta.
- * TODO: Put MetaReader and MetaEditor together; doesn't make sense having
- * them distinct. see HBASE-3475.
- */
-@InterfaceAudience.Private
-public class MetaEditor {
- // TODO: Strip CatalogTracker from this class. Its all over and in the end
- // its only used to get its Configuration so we can get associated
- // Connection.
- private static final Log LOG = LogFactory.getLog(MetaEditor.class);
-
- /**
- * Generates and returns a Put containing the region into for the catalog table
- */
- public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
- throws IOException {
- Put put = new Put(regionInfo.getRegionName());
- addRegionInfo(put, regionInfo);
- return put;
- }
-
- /**
- * Generates and returns a Delete containing the region info for the catalog
- * table
- */
- public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
- if (regionInfo == null) {
- throw new IllegalArgumentException("Can't make a delete for null region");
- }
- Delete delete = new Delete(regionInfo.getRegionName());
- return delete;
- }
-
- /**
- * Adds split daughters to the Put
- */
- public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
- if (splitA != null) {
- put.addImmutable(
- HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
- }
- if (splitB != null) {
- put.addImmutable(
- HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
- }
- return put;
- }
-
- /**
- * Put the passed p to the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param p Put to add to hbase:meta
- * @throws IOException
- */
- static void putToMetaTable(final CatalogTracker ct, final Put p)
- throws IOException {
- put(MetaReader.getMetaHTable(ct), p);
- }
-
- /**
- * Put the passed p to a catalog table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param p Put to add
- * @throws IOException
- */
- static void putToCatalogTable(final CatalogTracker ct, final Put p)
- throws IOException {
- put(MetaReader.getCatalogHTable(ct), p);
- }
-
- /**
- * @param t Table to use (will be closed when done).
- * @param p
- * @throws IOException
- */
- private static void put(final HTable t, final Put p) throws IOException {
- try {
- t.put(p);
- } finally {
- t.close();
- }
- }
-
- /**
- * Put the passed ps to the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param ps Put to add to hbase:meta
- * @throws IOException
- */
- public static void putsToMetaTable(final CatalogTracker ct, final List ps)
- throws IOException {
- HTable t = MetaReader.getMetaHTable(ct);
- try {
- t.put(ps);
- } finally {
- t.close();
- }
- }
-
- /**
- * Delete the passed d from the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param d Delete to add to hbase:meta
- * @throws IOException
- */
- static void deleteFromMetaTable(final CatalogTracker ct, final Delete d)
- throws IOException {
- List dels = new ArrayList(1);
- dels.add(d);
- deleteFromMetaTable(ct, dels);
- }
-
- /**
- * Delete the passed deletes from the hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param deletes Deletes to add to hbase:meta This list should support #remove.
- * @throws IOException
- */
- public static void deleteFromMetaTable(final CatalogTracker ct, final List deletes)
- throws IOException {
- HTable t = MetaReader.getMetaHTable(ct);
- try {
- t.delete(deletes);
- } finally {
- t.close();
- }
- }
-
- /**
- * Execute the passed mutations against hbase:meta table.
- * @param ct CatalogTracker on whose back we will ride the edit.
- * @param mutations Puts and Deletes to execute on hbase:meta
- * @throws IOException
- */
- public static void mutateMetaTable(final CatalogTracker ct, final List mutations)
- throws IOException {
- HTable t = MetaReader.getMetaHTable(ct);
- try {
- t.batch(mutations);
- } catch (InterruptedException e) {
- InterruptedIOException ie = new InterruptedIOException(e.getMessage());
- ie.initCause(e);
- throw ie;
- } finally {
- t.close();
- }
- }
-
- /**
- * Adds a hbase:meta row for the specified new region.
- * @param regionInfo region information
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(CatalogTracker catalogTracker,
- HRegionInfo regionInfo)
- throws IOException {
- putToMetaTable(catalogTracker, makePutFromRegionInfo(regionInfo));
- LOG.info("Added " + regionInfo.getRegionNameAsString());
- }
-
- /**
- * Adds a hbase:meta row for the specified new region to the given catalog table. The
- * HTable is not flushed or closed.
- * @param meta the HTable for META
- * @param regionInfo region information
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo) throws IOException {
- addRegionToMeta(meta, regionInfo, null, null);
- }
-
- /**
- * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
- * not add its daughter's as different rows, but adds information about the daughters
- * in the same row as the parent. Use
- * {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
- * if you want to do that.
- * @param meta the HTable for META
- * @param regionInfo region information
- * @param splitA first split daughter of the parent regionInfo
- * @param splitB second split daughter of the parent regionInfo
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(HTable meta, HRegionInfo regionInfo,
- HRegionInfo splitA, HRegionInfo splitB) throws IOException {
- Put put = makePutFromRegionInfo(regionInfo);
- addDaughtersToPut(put, splitA, splitB);
- meta.put(put);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Added " + regionInfo.getRegionNameAsString());
- }
- }
-
- /**
- * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this does
- * not add its daughter's as different rows, but adds information about the daughters
- * in the same row as the parent. Use
- * {@link #splitRegion(CatalogTracker, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
- * if you want to do that.
- * @param catalogTracker CatalogTracker on whose back we will ride the edit.
- * @param regionInfo region information
- * @param splitA first split daughter of the parent regionInfo
- * @param splitB second split daughter of the parent regionInfo
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionToMeta(CatalogTracker catalogTracker, HRegionInfo regionInfo,
- HRegionInfo splitA, HRegionInfo splitB) throws IOException {
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- addRegionToMeta(meta, regionInfo, splitA, splitB);
- } finally {
- meta.close();
- }
- }
-
- /**
- * Adds a hbase:meta row for each of the specified new regions.
- * @param catalogTracker CatalogTracker
- * @param regionInfos region information list
- * @throws IOException if problem connecting or updating meta
- */
- public static void addRegionsToMeta(CatalogTracker catalogTracker,
- List regionInfos)
- throws IOException {
- List puts = new ArrayList();
- for (HRegionInfo regionInfo : regionInfos) {
- puts.add(makePutFromRegionInfo(regionInfo));
- }
- putsToMetaTable(catalogTracker, puts);
- LOG.info("Added " + puts.size());
- }
-
- /**
- * Adds a daughter region entry to meta.
- * @param regionInfo the region to put
- * @param sn the location of the region
- * @param openSeqNum the latest sequence number obtained when the region was open
- */
- public static void addDaughter(final CatalogTracker catalogTracker,
- final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
- throws NotAllMetaRegionsOnlineException, IOException {
- Put put = new Put(regionInfo.getRegionName());
- addRegionInfo(put, regionInfo);
- if (sn != null) {
- addLocation(put, sn, openSeqNum);
- }
- putToMetaTable(catalogTracker, put);
- LOG.info("Added daughter " + regionInfo.getEncodedName() +
- (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
- }
-
- /**
- * Merge the two regions into one in an atomic operation. Deletes the two
- * merging regions in hbase:meta and adds the merged region with the information of
- * two merging regions.
- * @param catalogTracker the catalog tracker
- * @param mergedRegion the merged region
- * @param regionA
- * @param regionB
- * @param sn the location of the region
- * @throws IOException
- */
- public static void mergeRegions(final CatalogTracker catalogTracker,
- HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
- ServerName sn) throws IOException {
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
-
- // Put for parent
- Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
- putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
- regionA.toByteArray());
- putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
- regionB.toByteArray());
-
- // Deletes for merging regions
- Delete deleteA = makeDeleteFromRegionInfo(regionA);
- Delete deleteB = makeDeleteFromRegionInfo(regionB);
-
- // The merged is a new region, openSeqNum = 1 is fine.
- addLocation(putOfMerged, sn, 1);
-
- byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
- + HConstants.DELIMITER);
- multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
- } finally {
- meta.close();
- }
- }
-
- /**
- * Splits the region into two in an atomic operation. Offlines the parent
- * region with the information that it is split into two, and also adds
- * the daughter regions. Does not add the location information to the daughter
- * regions since they are not open yet.
- * @param catalogTracker the catalog tracker
- * @param parent the parent region which is split
- * @param splitA Split daughter region A
- * @param splitB Split daughter region A
- * @param sn the location of the region
- */
- public static void splitRegion(final CatalogTracker catalogTracker,
- HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
- ServerName sn) throws IOException {
- HTable meta = MetaReader.getMetaHTable(catalogTracker);
- try {
- HRegionInfo copyOfParent = new HRegionInfo(parent);
- copyOfParent.setOffline(true);
- copyOfParent.setSplit(true);
-
- //Put for parent
- Put putParent = makePutFromRegionInfo(copyOfParent);
- addDaughtersToPut(putParent, splitA, splitB);
-
- //Puts for daughters
- Put putA = makePutFromRegionInfo(splitA);
- Put putB = makePutFromRegionInfo(splitB);
-
- addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
- addLocation(putB, sn, 1);
-
- byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
- multiMutate(meta, tableRow, putParent, putA, putB);
- } finally {
- meta.close();
- }
- }
-
- /**
- * Performs an atomic multi-Mutate operation against the given table.
- */
- private static void multiMutate(HTable table, byte[] row, Mutation... mutations) throws IOException {
- CoprocessorRpcChannel channel = table.coprocessorService(row);
- MutateRowsRequest.Builder mmrBuilder = MutateRowsRequest.newBuilder();
- for (Mutation mutation : mutations) {
- if (mutation instanceof Put) {
- mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.PUT, mutation));
- } else if (mutation instanceof Delete) {
- mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(MutationType.DELETE, mutation));
- } else {
- throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
- + mutation.getClass().getName());
- }
- }
-
- MultiRowMutationService.BlockingInterface service =
- MultiRowMutationService.newBlockingStub(channel);
- try {
- service.mutateRows(null, mmrBuilder.build());
- } catch (ServiceException ex) {
- ProtobufUtil.toIOException(ex);
- }
- }
-
-
- /**
- * Updates the location of the specified hbase:meta region in ROOT to be the
- * specified server hostname and startcode.
- *
- * Uses passed catalog tracker to get a connection to the server hosting
- * ROOT and makes edits to that region.
- *
- * @param catalogTracker catalog tracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @param openSeqNum the latest sequence number obtained when the region was open
- * @throws IOException
- * @throws ConnectException Usually because the regionserver carrying hbase:meta
- * is down.
- * @throws NullPointerException Because no -ROOT- server connection
- */
- public static void updateMetaLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long openSeqNum)
- throws IOException, ConnectException {
- updateLocation(catalogTracker, regionInfo, sn, openSeqNum);
- }
-
- /**
- * Updates the location of the specified region in hbase:meta to be the specified
- * server hostname and startcode.
- *
- * Uses passed catalog tracker to get a connection to the server hosting
- * hbase:meta and makes edits to that region.
- *
- * @param catalogTracker catalog tracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @throws IOException
- */
- public static void updateRegionLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
- throws IOException {
- updateLocation(catalogTracker, regionInfo, sn, updateSeqNum);
- }
-
- /**
- * Updates the location of the specified region to be the specified server.
- *
- * Connects to the specified server which should be hosting the specified
- * catalog region name to perform the edit.
- *
- * @param catalogTracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @param openSeqNum the latest sequence number obtained when the region was open
- * @throws IOException In particular could throw {@link java.net.ConnectException}
- * if the server is down on other end.
- */
- private static void updateLocation(final CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long openSeqNum)
- throws IOException {
- Put put = new Put(regionInfo.getRegionName());
- addLocation(put, sn, openSeqNum);
- putToCatalogTable(catalogTracker, put);
- LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
- " with server=" + sn);
- }
-
- /**
- * Deletes the specified region from META.
- * @param catalogTracker
- * @param regionInfo region to be deleted from META
- * @throws IOException
- */
- public static void deleteRegion(CatalogTracker catalogTracker,
- HRegionInfo regionInfo)
- throws IOException {
- Delete delete = new Delete(regionInfo.getRegionName());
- deleteFromMetaTable(catalogTracker, delete);
- LOG.info("Deleted " + regionInfo.getRegionNameAsString());
- }
-
- /**
- * Deletes the specified regions from META.
- * @param catalogTracker
- * @param regionsInfo list of regions to be deleted from META
- * @throws IOException
- */
- public static void deleteRegions(CatalogTracker catalogTracker,
- List regionsInfo) throws IOException {
- List deletes = new ArrayList(regionsInfo.size());
- for (HRegionInfo hri: regionsInfo) {
- deletes.add(new Delete(hri.getRegionName()));
- }
- deleteFromMetaTable(catalogTracker, deletes);
- LOG.info("Deleted " + regionsInfo);
- }
-
- /**
- * Adds and Removes the specified regions from hbase:meta
- * @param catalogTracker
- * @param regionsToRemove list of regions to be deleted from META
- * @param regionsToAdd list of regions to be added to META
- * @throws IOException
- */
- public static void mutateRegions(CatalogTracker catalogTracker,
- final List regionsToRemove, final List regionsToAdd)
- throws IOException {
- List mutation = new ArrayList();
- if (regionsToRemove != null) {
- for (HRegionInfo hri: regionsToRemove) {
- mutation.add(new Delete(hri.getRegionName()));
- }
- }
- if (regionsToAdd != null) {
- for (HRegionInfo hri: regionsToAdd) {
- mutation.add(makePutFromRegionInfo(hri));
- }
- }
- mutateMetaTable(catalogTracker, mutation);
- if (regionsToRemove != null && regionsToRemove.size() > 0) {
- LOG.debug("Deleted " + regionsToRemove);
- }
- if (regionsToAdd != null && regionsToAdd.size() > 0) {
- LOG.debug("Added " + regionsToAdd);
- }
- }
-
- /**
- * Overwrites the specified regions from hbase:meta
- * @param catalogTracker
- * @param regionInfos list of regions to be added to META
- * @throws IOException
- */
- public static void overwriteRegions(CatalogTracker catalogTracker,
- List regionInfos) throws IOException {
- deleteRegions(catalogTracker, regionInfos);
- // Why sleep? This is the easiest way to ensure that the previous deletes does not
- // eclipse the following puts, that might happen in the same ts from the server.
- // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
- // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
- Threads.sleep(20);
- addRegionsToMeta(catalogTracker, regionInfos);
- LOG.info("Overwritten " + regionInfos);
- }
-
- /**
- * Deletes merge qualifiers for the specified merged region.
- * @param catalogTracker
- * @param mergedRegion
- * @throws IOException
- */
- public static void deleteMergeQualifiers(CatalogTracker catalogTracker,
- final HRegionInfo mergedRegion) throws IOException {
- Delete delete = new Delete(mergedRegion.getRegionName());
- delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
- delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
- deleteFromMetaTable(catalogTracker, delete);
- LOG.info("Deleted references in merged region "
- + mergedRegion.getRegionNameAsString() + ", qualifier="
- + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
- + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
- }
-
- private static Put addRegionInfo(final Put p, final HRegionInfo hri)
- throws IOException {
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- hri.toByteArray());
- return p;
- }
-
- private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(sn.getHostAndPort()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
- Bytes.toBytes(sn.getStartcode()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
- Bytes.toBytes(openSeqNum));
- return p;
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
index f623c4b..37eb5b3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationConvertingToPB.java
@@ -23,10 +23,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor.Visitor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.MasterServices;
@@ -69,13 +70,13 @@ public class MetaMigrationConvertingToPB {
HRegionInfo hri = parseFrom(hriBytes);
// Now make a put to write back to meta.
- Put p = MetaEditor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
// Now migrate info:splitA and info:splitB if they are not null
migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
- MetaEditor.putToCatalogTable(this.services.getCatalogTracker(), p);
+ MetaTableAccessor.putToMetaTable(this.services.getShortCircuitConnection(), p);
if (LOG.isDebugEnabled()) {
LOG.debug("Migrated " + Bytes.toString(p.getRow()));
}
@@ -128,7 +129,7 @@ public class MetaMigrationConvertingToPB {
*/
public static long updateMetaIfNecessary(final MasterServices services)
throws IOException {
- if (isMetaTableUpdated(services.getCatalogTracker())) {
+ if (isMetaTableUpdated(services.getShortCircuitConnection())) {
LOG.info("META already up-to date with PB serialization");
return 0;
}
@@ -150,18 +151,18 @@ public class MetaMigrationConvertingToPB {
static long updateMeta(final MasterServices masterServices) throws IOException {
LOG.info("Starting update of META");
ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
- MetaReader.fullScan(masterServices.getCatalogTracker(), v);
+ MetaTableAccessor.fullScan(masterServices.getShortCircuitConnection(), v);
LOG.info("Finished update of META. Total rows updated:" + v.numMigratedRows);
return v.numMigratedRows;
}
/**
- * @param catalogTracker the catalog tracker
+ * @param hConnection connection to be used
* @return True if the meta table has been migrated.
* @throws IOException
*/
- static boolean isMetaTableUpdated(final CatalogTracker catalogTracker) throws IOException {
- List results = MetaReader.fullScanOfMeta(catalogTracker);
+ static boolean isMetaTableUpdated(final HConnection hConnection) throws IOException {
+ List results = MetaTableAccessor.fullScanOfMeta(hConnection);
if (results == null || results.isEmpty()) {
LOG.info("hbase:meta doesn't have any entries to update.");
return true;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 7e7ba76..b8d7dfa 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
@@ -212,7 +212,7 @@ public class TableMapReduceUtil {
MutationSerialization.class.getName(), ResultSerialization.class.getName());
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
- int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+ int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
@@ -278,7 +278,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumReduceTasks(String table, JobConf job)
throws IOException {
- int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+ int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
@@ -293,7 +293,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumMapTasks(String table, JobConf job)
throws IOException {
- int regions = MetaReader.getRegionCount(HBaseConfiguration.create(job), table);
+ int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table);
if (job.getNumMapTasks() > regions)
job.setNumMapTasks(regions);
}
@@ -308,7 +308,7 @@ public class TableMapReduceUtil {
*/
public static void setNumReduceTasks(String table, JobConf job)
throws IOException {
- job.setNumReduceTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
+ job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
}
/**
@@ -321,7 +321,7 @@ public class TableMapReduceUtil {
*/
public static void setNumMapTasks(String table, JobConf job)
throws IOException {
- job.setNumMapTasks(MetaReader.getRegionCount(HBaseConfiguration.create(job), table));
+ job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table));
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 03bc4f0..3e2b261 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -637,7 +637,7 @@ public class TableMapReduceUtil {
job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class);
- int regions = MetaReader.getRegionCount(conf, table);
+ int regions = MetaTableAccessor.getRegionCount(conf, table);
if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions);
}
@@ -662,7 +662,7 @@ public class TableMapReduceUtil {
*/
public static void limitNumReduceTasks(String table, Job job)
throws IOException {
- int regions = MetaReader.getRegionCount(job.getConfiguration(), table);
+ int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table);
if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions);
}
@@ -677,7 +677,7 @@ public class TableMapReduceUtil {
*/
public static void setNumReduceTasks(String table, Job job)
throws IOException {
- job.setNumReduceTasks(MetaReader.getRegionCount(job.getConfiguration(), table));
+ job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table));
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 87433d3..2278032 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -57,8 +57,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
@@ -96,7 +95,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -134,8 +133,6 @@ public class AssignmentManager extends ZooKeeperListener {
private boolean shouldAssignRegionsWithFavoredNodes;
- private CatalogTracker catalogTracker;
-
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
@@ -251,20 +248,19 @@ public class AssignmentManager extends ZooKeeperListener {
*
* @param server
* @param serverManager
- * @param catalogTracker
+ * @param metaRegionLocator
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
- CatalogTracker catalogTracker, final LoadBalancer balancer,
+ final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException,
IOException, CoordinatedStateException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
- this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionStateStore = new RegionStateStore(server);
this.regionsToReopen = Collections.synchronizedMap
@@ -399,7 +395,8 @@ public class AssignmentManager extends ZooKeeperListener {
public Pair getReopenStatus(TableName tableName)
throws IOException {
List hris =
- MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
+ MetaTableAccessor.getTableRegions(this.watcher, this.server.getShortCircuitConnection(),
+ tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
@@ -754,7 +751,7 @@ public class AssignmentManager extends ZooKeeperListener {
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
- MetaRegionTracker.setMetaLocation(watcher, sn);
+ MetaTableLocator.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
@@ -1124,7 +1121,8 @@ public class AssignmentManager extends ZooKeeperListener {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
- FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
+ FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes,
+ this.server.getShortCircuitConnection());
}
/**
@@ -1147,7 +1145,8 @@ public class AssignmentManager extends ZooKeeperListener {
} else {
try {
byte [] name = rt.getRegionName();
- Pair p = MetaReader.getRegion(catalogTracker, name);
+ Pair p = MetaTableAccessor.getRegion(
+ this.server.getShortCircuitConnection(), name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
@@ -1930,13 +1929,15 @@ public class AssignmentManager extends ZooKeeperListener {
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
- ServerName server = catalogTracker.getMetaLocation();
+ ServerName server = this.server.getMetaTableLocator().
+ getMetaRegionLocation(this.server.getZooKeeper());
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
- catalogTracker.waitForMeta();
- Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
+ this.server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
+ Result r = MetaTableAccessor.getRegionResult(server.getShortCircuitConnection(),
+ region.getRegionName());
if (r == null || r.isEmpty()) return false;
ServerName server = HRegionInfo.getServerName(r);
return regionStates.isServerDeadAndNotProcessed(server);
@@ -2549,7 +2550,7 @@ public class AssignmentManager extends ZooKeeperListener {
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
- MetaRegionTracker.deleteMetaLocation(this.watcher);
+ this.server.getMetaTableLocator().deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
@@ -2719,7 +2720,7 @@ public class AssignmentManager extends ZooKeeperListener {
ZooKeeperProtos.Table.State.ENABLING);
// Region assignment from META
- List results = MetaReader.fullScan(this.catalogTracker);
+ List results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection());
// Get any new but slow to checkin region server that joined the cluster
Set onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
@@ -2783,7 +2784,7 @@ public class AssignmentManager extends ZooKeeperListener {
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
- new DisableTableHandler(this.server, tableName, catalogTracker,
+ new DisableTableHandler(this.server, tableName,
this, tableLockManager, true).prepare().process();
}
}
@@ -2810,7 +2811,7 @@ public class AssignmentManager extends ZooKeeperListener {
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
- catalogTracker, this, tableLockManager, true);
+ this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 59bc01e..81a0e1a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -40,8 +40,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Result;
@@ -199,7 +198,8 @@ public class CatalogJanitor extends Chore {
+ " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
- MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
+ MetaTableAccessor.deleteMergeQualifiers(server.getShortCircuitConnection(),
+ mergedRegion);
return true;
}
return false;
@@ -331,7 +331,7 @@ public class CatalogJanitor extends Chore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
- MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
+ MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent);
result = true;
}
return result;
@@ -403,9 +403,9 @@ public class CatalogJanitor extends Chore {
throws IOException {
// Get merge regions if it is a merged region and already has merge
// qualifier
- Pair mergeRegions = MetaReader
- .getRegionsFromMergeQualifier(this.services.getCatalogTracker(),
- region.getRegionName());
+ Pair mergeRegions = MetaTableAccessor
+ .getRegionsFromMergeQualifier(this.services.getShortCircuitConnection(),
+ region.getRegionName());
if (mergeRegions == null
|| (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {
// It doesn't have merge qualifier, no need to clean
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c92c020..8b9b275 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -64,7 +64,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
@@ -117,6 +118,7 @@ import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -409,7 +411,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
- this.catalogTracker, this.balancer, this.service, this.metricsMaster,
+ this.balancer, this.service, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
@@ -482,8 +484,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.serverManager = createServerManager(this, this);
- // Now we have the cluster ID, start catalog tracker
- startCatalogTracker();
+ metaTableLocator = new MetaTableLocator();
+ shortCircuitConnection = createShortCircuitConnection();
// Invalidate all write locks held previously
this.tableLockManager.reapWriteLocks();
@@ -523,7 +525,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.fileSystemManager.removeStaleRecoveringRegionsFromZK(previouslyFailedServers);
// log splitting for hbase:meta server
- ServerName oldMetaServerLocation = this.catalogTracker.getMetaLocation();
+ ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
splitMetaLogBeforeAssignment(oldMetaServerLocation);
// Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
@@ -575,8 +577,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization
// in meta. This must happen before we assign all user regions or else the assignment will
// fail.
- org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB
- .updateMetaIfNecessary(this);
+ MetaMigrationConvertingToPB.updateMetaIfNecessary(this);
// Fix up assignment manager status
status.setStatus("Starting assignment manager");
@@ -675,8 +676,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO);
boolean rit = this.assignmentManager
.processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.FIRST_META_REGIONINFO);
- boolean metaRegionLocation = this.catalogTracker.verifyMetaRegionLocation(timeout);
- ServerName currentMetaServer = this.catalogTracker.getMetaLocation();
+ boolean metaRegionLocation = metaTableLocator.verifyMetaRegionLocation(
+ this.getShortCircuitConnection(), this.getZooKeeper(), timeout);
+ ServerName currentMetaServer = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
if (!metaRegionLocation) {
// Meta location is not verified. It should be in transition, or offline.
// We will wait for it to be assigned in enableSSHandWaitForMeta below.
@@ -724,7 +726,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
enableServerShutdownHandler(assigned != 0);
LOG.info("hbase:meta assigned=" + assigned + ", rit=" + rit +
- ", location=" + catalogTracker.getMetaLocation());
+ ", location=" + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
status.setStatus("META assigned.");
}
@@ -764,7 +766,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
if (waitForMeta) {
- this.catalogTracker.waitForMeta();
+ metaTableLocator.waitMetaRegionLocation(this.getZooKeeper());
// Above check waits for general meta availability but this does not
// guarantee that the transition has completed
this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO);
@@ -1408,7 +1410,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
this.service.submit(new EnableTableHandler(this, tableName,
- catalogTracker, assignmentManager, tableLockManager, false).prepare());
+ assignmentManager, tableLockManager, false).prepare());
if (cpHost != null) {
cpHost.postEnableTable(tableName);
}
@@ -1422,7 +1424,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
this.service.submit(new DisableTableHandler(this, tableName,
- catalogTracker, assignmentManager, tableLockManager, false).prepare());
+ assignmentManager, tableLockManager, false).prepare());
if (cpHost != null) {
cpHost.postDisableTable(tableName);
}
@@ -1484,7 +1486,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (isCatalogTable(tableName)) {
throw new IOException("Can't modify catalog tables");
}
- if (!MetaReader.tableExists(getCatalogTracker(), tableName)) {
+ if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) {
throw new TableNotFoundException(tableName);
}
if (!getAssignmentManager().getTableStateManager().
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 153ffcb..f98eef7 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -47,19 +47,19 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.zookeeper.KeeperException;
/**
@@ -679,8 +679,9 @@ public class MasterFileSystem {
throws IOException {
if (!this.master.isStopped()) {
try {
- this.master.getCatalogTracker().waitForMeta();
- return MetaReader.getServerUserRegions(this.master.getCatalogTracker(), serverName);
+ master.getMetaTableLocator().waitMetaRegionLocation(master.getZooKeeper());
+ return MetaTableAccessor.getServerUserRegions(master.getShortCircuitConnection(),
+ serverName);
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 635523d..d055506 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
@@ -1086,7 +1086,7 @@ public class MasterRpcServices extends RSRpcServices
try {
master.checkInitialized();
Pair pair =
- MetaReader.getRegion(master.getCatalogTracker(), regionName);
+ MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName);
if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) {
@@ -1217,7 +1217,7 @@ public class MasterRpcServices extends RSRpcServices
+ " actual: " + type);
}
Pair pair =
- MetaReader.getRegion(master.getCatalogTracker(), regionName);
+ MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName);
if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
index 7a27489..75def7b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
/**
* The servlet responsible for rendering the index page of the
@@ -87,12 +88,7 @@ public class MasterStatusServlet extends HttpServlet {
}
private ServerName getMetaLocationOrNull(HMaster master) {
- try {
- return (master.getCatalogTracker() == null) ? null : master.getCatalogTracker().getMetaLocation();
- } catch (InterruptedException e) {
- LOG.warn("Unable to get meta location", e);
- return null;
- }
+ return master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper());
}
private Map getFragmentationInfo(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 848ce7a..282641e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -47,9 +47,9 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -141,7 +141,7 @@ public class RegionPlacementMaintainer {
public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot()
throws IOException {
SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot =
- new SnapshotOfRegionAssignmentFromMeta(new CatalogTracker(this.conf));
+ new SnapshotOfRegionAssignmentFromMeta(HConnectionManager.getConnection(conf));
currentAssignmentShapshot.initialize();
return currentAssignmentShapshot;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
index e0f07ff..38aa364 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
@@ -29,8 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
@@ -56,7 +55,6 @@ public class RegionStateStore {
private volatile boolean initialized;
private final boolean noPersistence;
- private final CatalogTracker catalogTracker;
private final Server server;
/**
@@ -109,7 +107,6 @@ public class RegionStateStore {
// No need to persist if using ZK but not migrating
noPersistence = ConfigUtil.useZKForAssignment(conf)
&& !conf.getBoolean("hbase.assignment.usezk.migrating", false);
- catalogTracker = server.getCatalogTracker();
this.server = server;
initialized = false;
}
@@ -123,7 +120,7 @@ public class RegionStateStore {
}
if (metaRegion == null) {
metaTable = new HTable(TableName.META_TABLE_NAME,
- catalogTracker.getConnection());
+ server.getShortCircuitConnection());
}
}
initialized = true;
@@ -199,7 +196,7 @@ public class RegionStateStore {
if (metaRegion != null) {
LOG.info("Meta region shortcut failed", t);
metaTable = new HTable(TableName.META_TABLE_NAME,
- catalogTracker.getConnection());
+ server.getShortCircuitConnection());
metaRegion = null;
}
}
@@ -216,11 +213,11 @@ public class RegionStateStore {
void splitRegion(HRegionInfo p,
HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
- MetaEditor.splitRegion(catalogTracker, p, a, b, sn);
+ MetaTableAccessor.splitRegion(server.getShortCircuitConnection(), p, a, b, sn);
}
void mergeRegions(HRegionInfo p,
HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
- MetaEditor.mergeRegions(catalogTracker, p, a, b, sn);
+ MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(), p, a, b, sn);
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 85677af..9e8a659 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -783,7 +783,7 @@ public class RegionStates {
try {
Pair p =
- MetaReader.getRegion(server.getCatalogTracker(), regionName);
+ MetaTableAccessor.getRegion(server.getShortCircuitConnection(), regionName);
HRegionInfo hri = p == null ? null : p.getFirst();
if (hri != null) {
createRegionState(hri);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index b98c860..ed04b51 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor.Visitor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
@@ -54,7 +54,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
private static final Log LOG = LogFactory.getLog(SnapshotOfRegionAssignmentFromMeta.class
.getName());
- private CatalogTracker tracker;
+ private final HConnection hConnection;
/** the table name to region map */
private final Map> tableToRegionMap;
@@ -71,13 +71,13 @@ public class SnapshotOfRegionAssignmentFromMeta {
private final Set disabledTables;
private final boolean excludeOfflinedSplitParents;
- public SnapshotOfRegionAssignmentFromMeta(CatalogTracker tracker) {
- this(tracker, new HashSet(), false);
+ public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection) {
+ this(hConnection, new HashSet(), false);
}
- public SnapshotOfRegionAssignmentFromMeta(CatalogTracker tracker, Set disabledTables,
+ public SnapshotOfRegionAssignmentFromMeta(HConnection hConnection, Set disabledTables,
boolean excludeOfflinedSplitParents) {
- this.tracker = tracker;
+ this.hConnection = hConnection;
tableToRegionMap = new HashMap>();
regionToRegionServerMap = new HashMap();
regionServerToRegionMap = new HashMap>();
@@ -94,7 +94,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
public void initialize() throws IOException {
LOG.info("Start to scan the hbase:meta for the current region assignment " +
"snappshot");
- // TODO: at some point this code could live in the MetaReader
+ // TODO: at some point this code could live in the MetaTableAccessor
Visitor v = new Visitor() {
@Override
public boolean visit(Result result) throws IOException {
@@ -133,7 +133,7 @@ public class SnapshotOfRegionAssignmentFromMeta {
}
};
// Scan hbase:meta to pick up user regions
- MetaReader.fullScan(tracker, v);
+ MetaTableAccessor.fullScan(hConnection, v);
//regionToRegionServerMap = regions;
LOG.info("Finished to scan the hbase:meta for the current region assignment" +
"snapshot");
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index e8e5a28..b32c777 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.NamespaceExistException;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZKNamespaceManager;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
@@ -80,8 +80,8 @@ public class TableNamespaceManager {
}
public void start() throws IOException {
- if (!MetaReader.tableExists(masterServices.getCatalogTracker(),
- TableName.NAMESPACE_TABLE_NAME)) {
+ if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(),
+ TableName.NAMESPACE_TABLE_NAME)) {
LOG.info("Namespace table not found. Creating...");
createNamespaceTable(masterServices);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
index 2a138d3..31a42d8 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java
@@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.master.RackManager;
@@ -91,7 +91,7 @@ public class FavoredNodeAssignmentHelper {
*/
public static void updateMetaWithFavoredNodesInfo(
Map> regionToFavoredNodes,
- CatalogTracker catalogTracker) throws IOException {
+ HConnection hConnection) throws IOException {
List puts = new ArrayList();
for (Map.Entry> entry : regionToFavoredNodes.entrySet()) {
Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
@@ -99,7 +99,7 @@ public class FavoredNodeAssignmentHelper {
puts.add(put);
}
}
- MetaEditor.putsToMetaTable(catalogTracker, puts);
+ MetaTableAccessor.putsToMetaTable(hConnection, puts);
LOG.info("Added " + puts.size() + " regions in META");
}
@@ -141,7 +141,7 @@ public class FavoredNodeAssignmentHelper {
throws IOException {
Put put = null;
if (favoredNodeList != null) {
- put = MetaEditor.makePutFromRegionInfo(regionInfo);
+ put = MetaTableAccessor.makePutFromRegionInfo(regionInfo);
byte[] favoredNodes = getFavoredNodes(favoredNodeList);
put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER,
EnvironmentEdgeManager.currentTimeMillis(), favoredNodes);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
index a2730c5..67d73df 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
@@ -73,7 +73,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
List plans = new ArrayList();
//perform a scan of the meta to get the latest updates (if any)
SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment =
- new SnapshotOfRegionAssignmentFromMeta(super.services.getCatalogTracker());
+ new SnapshotOfRegionAssignmentFromMeta(super.services.getShortCircuitConnection());
try {
snaphotOfRegionAssignment.initialize();
} catch (IOException ie) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index 6ddd0e7..10d1518 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -35,9 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -51,6 +49,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
/**
* Handler to create a table.
@@ -62,7 +61,6 @@ public class CreateTableHandler extends EventHandler {
protected final HTableDescriptor hTableDescriptor;
protected final Configuration conf;
private final AssignmentManager assignmentManager;
- private final CatalogTracker catalogTracker;
private final TableLockManager tableLockManager;
private final HRegionInfo [] newRegions;
private final TableLock tableLock;
@@ -76,7 +74,6 @@ public class CreateTableHandler extends EventHandler {
this.hTableDescriptor = hTableDescriptor;
this.conf = conf;
this.newRegions = newRegions;
- this.catalogTracker = masterServices.getCatalogTracker();
this.assignmentManager = masterServices.getAssignmentManager();
this.tableLockManager = masterServices.getTableLockManager();
@@ -84,12 +81,12 @@ public class CreateTableHandler extends EventHandler {
, EventType.C_M_CREATE_TABLE.toString());
}
- public CreateTableHandler prepare()
- throws NotAllMetaRegionsOnlineException, TableExistsException, IOException {
+ public CreateTableHandler prepare() throws IOException {
int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
// Need hbase:meta availability to create a table
try {
- if (catalogTracker.waitForMeta(timeout) == null) {
+ if (server.getMetaTableLocator().waitMetaRegionLocation(
+ server.getZooKeeper(), timeout) == null) {
throw new NotAllMetaRegionsOnlineException();
}
} catch (InterruptedException e) {
@@ -104,7 +101,7 @@ public class CreateTableHandler extends EventHandler {
boolean success = false;
try {
TableName tableName = this.hTableDescriptor.getTableName();
- if (MetaReader.tableExists(catalogTracker, tableName)) {
+ if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
throw new TableExistsException(tableName);
}
@@ -239,7 +236,7 @@ public class CreateTableHandler extends EventHandler {
if (regionInfos != null && regionInfos.size() > 0) {
// 4. Add regions to META
- addRegionsToMeta(this.catalogTracker, regionInfos);
+ addRegionsToMeta(regionInfos);
// 5. Trigger immediate assignment of the regions in round-robin fashion
ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
@@ -281,8 +278,8 @@ public class CreateTableHandler extends EventHandler {
/**
* Add the specified set of regions to the hbase:meta table.
*/
- protected void addRegionsToMeta(final CatalogTracker ct, final List regionInfos)
+ protected void addRegionsToMeta(final List regionInfos)
throws IOException {
- MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos);
+ MetaTableAccessor.addRegionsToMeta(this.server.getShortCircuitConnection(), regionInfos);
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
index 8874ff1..668c2f9 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -133,7 +133,7 @@ public class DeleteTableHandler extends TableEventHandler {
throws IOException, CoordinatedStateException {
// 1. Remove regions from META
LOG.debug("Deleting regions from META");
- MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
+ MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions);
// -----------------------------------------------------------------------
// NOTE: At this point we still have data on disk, but nothing in hbase:meta
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index f9f6147..c652712 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -31,8 +31,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
@@ -56,17 +55,14 @@ public class DisableTableHandler extends EventHandler {
private final TableName tableName;
private final AssignmentManager assignmentManager;
private final TableLockManager tableLockManager;
- private final CatalogTracker catalogTracker;
private final boolean skipTableStateCheck;
private TableLock tableLock;
- public DisableTableHandler(Server server, TableName tableName,
- CatalogTracker catalogTracker, AssignmentManager assignmentManager,
+ public DisableTableHandler(Server server, TableName tableName, AssignmentManager assignmentManager,
TableLockManager tableLockManager, boolean skipTableStateCheck) {
super(server, EventType.C_M_DISABLE_TABLE);
this.tableName = tableName;
this.assignmentManager = assignmentManager;
- this.catalogTracker = catalogTracker;
this.tableLockManager = tableLockManager;
this.skipTableStateCheck = skipTableStateCheck;
}
@@ -84,7 +80,7 @@ public class DisableTableHandler extends EventHandler {
boolean success = false;
try {
// Check if table exists
- if (!MetaReader.tableExists(catalogTracker, tableName)) {
+ if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
throw new TableNotFoundException(tableName);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index 6fb2302..cb623ea 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -33,8 +33,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -58,16 +57,14 @@ public class EnableTableHandler extends EventHandler {
private final TableName tableName;
private final AssignmentManager assignmentManager;
private final TableLockManager tableLockManager;
- private final CatalogTracker catalogTracker;
private boolean skipTableStateCheck = false;
private TableLock tableLock;
public EnableTableHandler(Server server, TableName tableName,
- CatalogTracker catalogTracker, AssignmentManager assignmentManager,
- TableLockManager tableLockManager, boolean skipTableStateCheck) {
+ AssignmentManager assignmentManager, TableLockManager tableLockManager,
+ boolean skipTableStateCheck) {
super(server, EventType.C_M_ENABLE_TABLE);
this.tableName = tableName;
- this.catalogTracker = catalogTracker;
this.assignmentManager = assignmentManager;
this.tableLockManager = tableLockManager;
this.skipTableStateCheck = skipTableStateCheck;
@@ -83,7 +80,7 @@ public class EnableTableHandler extends EventHandler {
boolean success = false;
try {
// Check if table exists
- if (!MetaReader.tableExists(catalogTracker, tableName)) {
+ if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
// retainAssignment is true only during recovery. In normal case it is false
if (!this.skipTableStateCheck) {
throw new TableNotFoundException(tableName);
@@ -181,8 +178,9 @@ public class EnableTableHandler extends EventHandler {
ServerManager serverManager = ((HMaster)this.server).getServerManager();
// Get the regions of this table. We're done when all listed
// tables are onlined.
- List> tableRegionsAndLocations = MetaReader
- .getTableRegionsAndLocations(this.catalogTracker, tableName, true);
+ List> tableRegionsAndLocations = MetaTableAccessor
+ .getTableRegionsAndLocations(this.server.getZooKeeper(),
+ this.server.getShortCircuitConnection(), tableName, true);
int countOfRegionsInTable = tableRegionsAndLocations.size();
Map regionsToAssign =
regionsToAssignWithServerName(tableRegionsAndLocations);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
index 11a4fb2..a24e387 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
@@ -91,7 +91,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
verifyAndAssignMetaWithRetries();
- } else if (!this.services.getCatalogTracker().isMetaLocationAvailable()) {
+ } else if (!server.getMetaTableLocator().isLocationAvailable(this.server.getZooKeeper())) {
// the meta location as per master is null. This could happen in case when meta assignment
// in previous run failed, while meta znode has been updated to null. We should try to
// assign the meta again.
@@ -154,14 +154,16 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
throws InterruptedException, IOException, KeeperException {
long timeout = this.server.getConfiguration().
getLong("hbase.catalog.verification.timeout", 1000);
- if (!this.server.getCatalogTracker().verifyMetaRegionLocation(timeout)) {
+ if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getShortCircuitConnection(),
+ this.server.getZooKeeper(), timeout)) {
this.services.getAssignmentManager().assignMeta();
- } else if (serverName.equals(server.getCatalogTracker().getMetaLocation())) {
+ } else if (serverName.equals(server.getMetaTableLocator().getMetaRegionLocation(
+ this.server.getZooKeeper()))) {
throw new IOException("hbase:meta is onlined on the dead server "
+ serverName);
} else {
LOG.info("Skip assigning hbase:meta, because it is online on the "
- + server.getCatalogTracker().getMetaLocation());
+ + server.getMetaTableLocator().getMetaRegionLocation(this.server.getZooKeeper()));
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index 50e09ad..80b835f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -143,7 +143,7 @@ public class ServerShutdownHandler extends EventHandler {
// Wait on meta to come online; we need it to progress.
// TODO: Best way to hold strictly here? We should build this retry logic
- // into the MetaReader operations themselves.
+ // into the MetaTableAccessor operations themselves.
// TODO: Is the reading of hbase:meta necessary when the Master has state of
// cluster in its head? It should be possible to do without reading hbase:meta
// in all but one case. On split, the RS updates the hbase:meta
@@ -159,10 +159,10 @@ public class ServerShutdownHandler extends EventHandler {
Set hris = null;
while (!this.server.isStopped()) {
try {
- this.server.getCatalogTracker().waitForMeta();
+ server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
// Skip getting user regions if the server is stopped.
if (!this.server.isStopped()) {
- hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
+ hris = MetaTableAccessor.getServerUserRegions(this.server.getShortCircuitConnection(),
this.serverName).keySet();
}
break;
@@ -226,7 +226,7 @@ public class ServerShutdownHandler extends EventHandler {
Lock lock = am.acquireRegionLock(encodedName);
try {
RegionState rit = regionStates.getRegionTransitionState(hri);
- if (processDeadRegion(hri, am, server.getCatalogTracker())) {
+ if (processDeadRegion(hri, am)) {
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
// If this region is in transition on the dead server, it must be
@@ -327,12 +327,11 @@ public class ServerShutdownHandler extends EventHandler {
* disabling or if the region has a partially completed split.
* @param hri
* @param assignmentManager
- * @param catalogTracker
* @return Returns true if specified region should be assigned, false if not.
* @throws IOException
*/
public static boolean processDeadRegion(HRegionInfo hri,
- AssignmentManager assignmentManager, CatalogTracker catalogTracker)
+ AssignmentManager assignmentManager)
throws IOException {
boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable());
if (!tablePresent) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index ed34875..db3db69 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
@@ -125,8 +125,8 @@ public abstract class TableEventHandler extends EventHandler {
tableName);
List hris =
- MetaReader.getTableRegions(this.server.getCatalogTracker(),
- tableName);
+ MetaTableAccessor.getTableRegions(this.server.getZooKeeper(),
+ this.server.getShortCircuitConnection(), tableName);
handleTableOperation(hris);
if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
getAssignmentManager().getTableStateManager().isTableState(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
index fce86d1..1dce3f2 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -124,7 +124,8 @@ public class TruncateTableHandler extends DeleteTableHandler {
}
// 4. Add regions to META
- MetaEditor.addRegionsToMeta(masterServices.getCatalogTracker(), regionInfos);
+ MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(),
+ regionInfos);
// 5. Trigger immediate assignment of the regions in round-robin fashion
ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index d902c58..b8dbcd3 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterServices;
@@ -139,10 +138,10 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
}
@Override
- protected void addRegionsToMeta(final CatalogTracker ct, final List regionInfos)
+ protected void addRegionsToMeta(final List regionInfos)
throws IOException {
- super.addRegionsToMeta(ct, regionInfos);
- metaChanges.updateMetaParentRegions(ct, regionInfos);
+ super.addRegionsToMeta(regionInfos);
+ metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), regionInfos);
}
@Override
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index d5d9993..ce3d6b7 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
@@ -149,8 +149,8 @@ public final class MasterSnapshotVerifier {
* @throws IOException if we can't reach hbase:meta or read the files from the FS
*/
private void verifyRegions(final SnapshotManifest manifest) throws IOException {
- List regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
- tableName);
+ List regions = MetaTableAccessor.getTableRegions(
+ this.services.getZooKeeper(), this.services.getShortCircuitConnection(), tableName);
Map regionManifests = manifest.getRegionManifestsMap();
if (regionManifests == null) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index 2b97505..f71f2ef 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.executor.EventType;
@@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
@Override
protected void handleTableOperation(List hris) throws IOException {
MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
- CatalogTracker catalogTracker = masterServices.getCatalogTracker();
+ HConnection conn = masterServices.getShortCircuitConnection();
FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir();
TableName tableName = hTableDescriptor.getTableName();
@@ -149,7 +149,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
// that are not correct after the restore.
List hrisToRemove = new LinkedList();
if (metaChanges.hasRegionsToRemove()) hrisToRemove.addAll(metaChanges.getRegionsToRemove());
- MetaEditor.deleteRegions(catalogTracker, hrisToRemove);
+ MetaTableAccessor.deleteRegions(conn, hrisToRemove);
// 4.2 Add the new set of regions to META
//
@@ -159,11 +159,11 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
// in the snapshot folder.
hris.clear();
if (metaChanges.hasRegionsToAdd()) hris.addAll(metaChanges.getRegionsToAdd());
- MetaEditor.addRegionsToMeta(catalogTracker, hris);
+ MetaTableAccessor.addRegionsToMeta(conn, hris);
if (metaChanges.hasRegionsToRestore()) {
- MetaEditor.overwriteRegions(catalogTracker, metaChanges.getRegionsToRestore());
+ MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore());
}
- metaChanges.updateMetaParentRegions(catalogTracker, hris);
+ metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), hris);
// At this point the restore is complete. Next step is enabling the table.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index ddedf9f..0853afb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -694,7 +694,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
- if (MetaReader.tableExists(master.getCatalogTracker(), tableName)) {
+ if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) {
if (master.getAssignmentManager().getTableStateManager().isTableState(
TableName.valueOf(fsSnapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) {
throw new UnsupportedOperationException("Table '" +
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 4d6182b..2ba729d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -168,8 +168,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
monitor.rethrowException();
List> regionsAndLocations =
- MetaReader.getTableRegionsAndLocations(this.server.getCatalogTracker(),
- snapshotTable, false);
+ MetaTableAccessor.getTableRegionsAndLocations(this.server.getZooKeeper(),
+ this.server.getShortCircuitConnection(), snapshotTable, false);
// run the snapshot
snapshotRegions(regionsAndLocations);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index b054813..9b1e3e6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -431,8 +431,8 @@ public class NamespaceUpgrade implements Tool {
newRegionDir);
}
}
- meta.put(MetaEditor.makePutFromRegionInfo(newRegionInfo));
- meta.delete(MetaEditor.makeDeleteFromRegionInfo(oldRegionInfo));
+ meta.put(MetaTableAccessor.makePutFromRegionInfo(newRegionInfo));
+ meta.delete(MetaTableAccessor.makeDeleteFromRegionInfo(oldRegionInfo));
}
} finally {
meta.flushcache();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index 16456c3..1025289 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -124,8 +124,9 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
List> regionsAndLocations = null;
try {
regionsAndLocations =
- MetaReader.getTableRegionsAndLocations(this.master.getCatalogTracker(),
- TableName.valueOf(desc.getInstance()), false);
+ MetaTableAccessor.getTableRegionsAndLocations(this.master.getZooKeeper(),
+ this.master.getShortCircuitConnection(),
+ TableName.valueOf(desc.getInstance()), false);
} catch (InterruptedException e1) {
String msg = "Failed to get regions for '" + desc.getInstance() + "'";
LOG.error(msg);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 84f986b..653a3b6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -72,8 +72,7 @@ import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZNodeClearer;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -135,7 +134,7 @@ import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.RecoveringRegionWatcher;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -182,8 +181,20 @@ public class HRegionServer extends HasThread implements
protected HeapMemoryManager hMemManager;
- // catalog tracker
- protected CatalogTracker catalogTracker;
+ /*
+ * Short-circuit (ie. bypassing RPC layer) HConnection to this Server
+ * to be used internally for miscellaneous needs. Initialized at the server startup
+ * and closed when server shuts down. Clients must never close it explicitly.
+ */
+ protected HConnection shortCircuitConnection;
+
+ /*
+ * Long-living meta table locator, which is created when the server is started and stopped
+ * when server shuts down. References to this locator shall be used to perform according
+ * operations in EventHandlers. Primary reason for this decision is to make it mockable
+ * for tests.
+ */
+ protected MetaTableLocator metaTableLocator;
// Watch if a region is out of recovering state from ZooKeeper
@SuppressWarnings("unused")
@@ -540,14 +551,13 @@ public class HRegionServer extends HasThread implements
}
/**
- * Create CatalogTracker.
+ * Create wrapped short-circuit connection to this server.
* In its own method so can intercept and mock it over in tests.
* @throws IOException
*/
- protected CatalogTracker createCatalogTracker() throws IOException {
- HConnection conn = ConnectionUtils.createShortCircuitHConnection(
+ protected HConnection createShortCircuitConnection() throws IOException {
+ return ConnectionUtils.createShortCircuitHConnection(
HConnectionManager.getConnection(conf), serverName, rpcServices, rpcServices);
- return new CatalogTracker(zooKeeper, conf, conn, this);
}
/**
@@ -593,7 +603,7 @@ public class HRegionServer extends HasThread implements
* Bring up connection to zk ensemble and then wait until a master for this
* cluster and then after that, wait until cluster 'up' flag has been set.
* This is the order in which master does things.
- * Finally put up a catalog tracker.
+ * Finally open long-living server short-circuit connection.
* @throws IOException
* @throws InterruptedException
*/
@@ -622,8 +632,8 @@ public class HRegionServer extends HasThread implements
this.abort("Failed to retrieve Cluster ID",e);
}
- // Now we have the cluster ID, start catalog tracker
- startCatalogTracker();
+ shortCircuitConnection = createShortCircuitConnection();
+ metaTableLocator = new MetaTableLocator();
// watch for snapshots and other procedures
try {
@@ -696,17 +706,6 @@ public class HRegionServer extends HasThread implements
}
/**
- * Create and start the catalog tracker if not already done.
- */
- protected synchronized void startCatalogTracker()
- throws IOException, InterruptedException {
- if (catalogTracker == null) {
- catalogTracker = createCatalogTracker();
- catalogTracker.start();
- }
- }
-
- /**
* The HRegionServer sticks in this loop until closed.
*/
@Override
@@ -847,9 +846,18 @@ public class HRegionServer extends HasThread implements
closeUserRegions(abortRequested);
LOG.info("stopping server " + this.serverName);
}
- // Interrupt catalog tracker here in case any regions being opened out in
- // handlers are stuck waiting on meta.
- if (this.catalogTracker != null) this.catalogTracker.stop();
+
+ // so callers waiting for meta without timeout can stop
+ metaTableLocator.stop();
+ if (this.shortCircuitConnection != null && !shortCircuitConnection.isClosed()) {
+ try {
+ this.shortCircuitConnection.close();
+ } catch (IOException e) {
+ // Although the {@link Closeable} interface throws an {@link
+ // IOException}, in reality, the implementation would never do that.
+ LOG.error("Attempt to close server's short circuit HConnection failed.", e);
+ }
+ }
// Closing the compactSplit thread before closing meta regions
if (!this.killed && containsMetaTableRegions()) {
@@ -1628,8 +1636,13 @@ public class HRegionServer extends HasThread implements
}
@Override
- public CatalogTracker getCatalogTracker() {
- return this.catalogTracker;
+ public HConnection getShortCircuitConnection() {
+ return this.shortCircuitConnection;
+ }
+
+ @Override
+ public MetaTableLocator getMetaTableLocator() {
+ return this.metaTableLocator;
}
@Override
@@ -1656,7 +1669,7 @@ public class HRegionServer extends HasThread implements
}
@Override
- public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
+ public void postOpenDeployTasks(final HRegion r)
throws KeeperException, IOException {
rpcServices.checkOpen();
LOG.info("Post open deploy tasks for " + r.getRegionNameAsString());
@@ -1678,9 +1691,9 @@ public class HRegionServer extends HasThread implements
// Update ZK, or META
if (r.getRegionInfo().isMetaRegion()) {
- MetaRegionTracker.setMetaLocation(getZooKeeper(), serverName);
+ MetaTableLocator.setMetaLocation(getZooKeeper(), serverName);
} else if (useZKForAssignment) {
- MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
+ MetaTableAccessor.updateRegionLocation(shortCircuitConnection, r.getRegionInfo(),
this.serverName, openSeqNum);
}
if (!useZKForAssignment && !reportRegionStateTransition(
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 4d133c3..3932ede 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete;
@@ -1205,8 +1205,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
// See HBASE-5094. Cross check with hbase:meta if still this RS is owning
// the region.
- Pair p = MetaReader.getRegion(
- regionServer.catalogTracker, region.getRegionName());
+ Pair p = MetaTableAccessor.getRegion(
+ regionServer.getShortCircuitConnection(), region.getRegionName());
if (regionServer.serverName.equals(p.getSecond())) {
Boolean closing = regionServer.regionsInTransitionInRS.get(region.getEncodedNameAsBytes());
// Map regionsInTransitionInRSOnly has an entry for a region only if the region
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
index 8be66e0..0fa0988 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
@@ -33,10 +33,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
@@ -325,10 +324,10 @@ public class RegionMergeTransaction {
// rollback
if (!testing && useCoordinationForAssignment) {
if (metaEntries.isEmpty()) {
- MetaEditor.mergeRegions(server.getCatalogTracker(), mergedRegion.getRegionInfo(), region_a
+ MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(), mergedRegion.getRegionInfo(), region_a
.getRegionInfo(), region_b.getRegionInfo(), server.getServerName());
} else {
- mergeRegionsAndPutMetaEntries(server.getCatalogTracker(), mergedRegion.getRegionInfo(),
+ mergeRegionsAndPutMetaEntries(server.getShortCircuitConnection(), mergedRegion.getRegionInfo(),
region_a.getRegionInfo(), region_b.getRegionInfo(), server.getServerName(), metaEntries);
}
} else if (services != null && !useCoordinationForAssignment) {
@@ -343,11 +342,11 @@ public class RegionMergeTransaction {
return mergedRegion;
}
- private void mergeRegionsAndPutMetaEntries(CatalogTracker catalogTracker,
+ private void mergeRegionsAndPutMetaEntries(HConnection hConnection,
HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB, ServerName serverName,
List metaEntries) throws IOException {
prepareMutationsForMerge(mergedRegion, regionA, regionB, serverName, metaEntries);
- MetaEditor.mutateMetaTable(catalogTracker, metaEntries);
+ MetaTableAccessor.mutateMetaTable(hConnection, metaEntries);
}
public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA,
@@ -355,13 +354,13 @@ public class RegionMergeTransaction {
HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
// Put for parent
- Put putOfMerged = MetaEditor.makePutFromRegionInfo(copyOfMerged);
+ Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged);
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray());
putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray());
mutations.add(putOfMerged);
// Deletes for merging regions
- Delete deleteA = MetaEditor.makeDeleteFromRegionInfo(regionA);
- Delete deleteB = MetaEditor.makeDeleteFromRegionInfo(regionB);
+ Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA);
+ Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB);
mutations.add(deleteA);
mutations.add(deleteB);
// The merged is a new region, openSeqNum = 1 is fine.
@@ -579,7 +578,7 @@ public class RegionMergeTransaction {
if (services != null) {
try {
if (useCoordinationForAssignment) {
- services.postOpenDeployTasks(merged, server.getCatalogTracker());
+ services.postOpenDeployTasks(merged);
} else if (!services.reportRegionStateTransition(TransitionCode.MERGED,
mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
throw new IOException("Failed to report merged region to master: "
@@ -753,8 +752,8 @@ public class RegionMergeTransaction {
if (services == null) return false;
// Get merge regions if it is a merged region and already has merge
// qualifier
- Pair mergeRegions = MetaReader
- .getRegionsFromMergeQualifier(services.getCatalogTracker(), regionName);
+ Pair mergeRegions = MetaTableAccessor
+ .getRegionsFromMergeQualifier(services.getShortCircuitConnection(), regionName);
if (mergeRegions != null &&
(mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
// It has merge qualifier
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 0ad1f29..63dd003 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -25,7 +25,6 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager;
@@ -73,11 +72,10 @@ public interface RegionServerServices
* regionserver
*
* @param r Region to open.
- * @param ct Instance of {@link CatalogTracker}
* @throws KeeperException
* @throws IOException
*/
- void postOpenDeployTasks(final HRegion r, final CatalogTracker ct)
+ void postOpenDeployTasks(final HRegion r)
throws KeeperException, IOException;
/**
@@ -117,11 +115,6 @@ public interface RegionServerServices
ExecutorService getExecutorService();
/**
- * @return The RegionServer's CatalogTracker
- */
- CatalogTracker getCatalogTracker();
-
- /**
* @return set of recovering regions on the hosting region server
*/
Map getRecoveringRegions();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index e8754d6..147e012 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
@@ -277,11 +277,11 @@ public class SplitTransaction {
// and assign the parent region.
if (!testing && useZKForAssignment) {
if (metaEntries == null || metaEntries.isEmpty()) {
- MetaEditor.splitRegion(server.getCatalogTracker(),
- parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
- daughterRegions.getSecond().getRegionInfo(), server.getServerName());
+ MetaTableAccessor.splitRegion(server.getShortCircuitConnection(),
+ parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
+ daughterRegions.getSecond().getRegionInfo(), server.getServerName());
} else {
- offlineParentInMetaAndputMetaEntries(server.getCatalogTracker(),
+ offlineParentInMetaAndputMetaEntries(server.getShortCircuitConnection(),
parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), daughterRegions
.getSecond().getRegionInfo(), server.getServerName(), metaEntries);
}
@@ -418,7 +418,7 @@ public class SplitTransaction {
try {
if (useZKForAssignment) {
// add 2nd daughter first (see HBASE-4335)
- services.postOpenDeployTasks(b, server.getCatalogTracker());
+ services.postOpenDeployTasks(b);
} else if (!services.reportRegionStateTransition(TransitionCode.SPLIT,
parent.getRegionInfo(), hri_a, hri_b)) {
throw new IOException("Failed to report split region to master: "
@@ -427,7 +427,7 @@ public class SplitTransaction {
// Should add it to OnlineRegions
services.addToOnlineRegions(b);
if (useZKForAssignment) {
- services.postOpenDeployTasks(a, server.getCatalogTracker());
+ services.postOpenDeployTasks(a);
}
services.addToOnlineRegions(a);
} catch (KeeperException ke) {
@@ -482,7 +482,7 @@ public class SplitTransaction {
return regions;
}
- private void offlineParentInMetaAndputMetaEntries(CatalogTracker catalogTracker,
+ private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
ServerName serverName, List metaEntries) throws IOException {
List mutations = metaEntries;
@@ -491,19 +491,19 @@ public class SplitTransaction {
copyOfParent.setSplit(true);
//Put for parent
- Put putParent = MetaEditor.makePutFromRegionInfo(copyOfParent);
- MetaEditor.addDaughtersToPut(putParent, splitA, splitB);
+ Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
+ MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
mutations.add(putParent);
//Puts for daughters
- Put putA = MetaEditor.makePutFromRegionInfo(splitA);
- Put putB = MetaEditor.makePutFromRegionInfo(splitB);
+ Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
+ Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
addLocation(putB, serverName, 1);
mutations.add(putA);
mutations.add(putB);
- MetaEditor.mutateMetaTable(catalogTracker, mutations);
+ MetaTableAccessor.mutateMetaTable(hConnection, mutations);
}
public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
index d0005bc..a216dba 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
@@ -297,9 +297,7 @@ public class OpenRegionHandler extends EventHandler {
/**
* Thread to run region post open tasks. Call {@link #getException()} after
* the thread finishes to check for exceptions running
- * {@link RegionServerServices#postOpenDeployTasks(
- * HRegion, org.apache.hadoop.hbase.catalog.CatalogTracker)}
- * .
+ * {@link RegionServerServices#postOpenDeployTasks(HRegion)
*/
static class PostOpenDeployTasksThread extends Thread {
private Throwable exception = null;
@@ -320,8 +318,7 @@ public class OpenRegionHandler extends EventHandler {
public void run() {
try {
- this.services.postOpenDeployTasks(this.region,
- this.server.getCatalogTracker());
+ this.services.postOpenDeployTasks(this.region);
} catch (IOException e) {
server.abort("Exception running postOpenDeployTasks; region=" +
this.region.getRegionInfo().getEncodedName(), e);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index cdb50fa..0408231 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -28,9 +28,10 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -149,7 +150,12 @@ public class ReplicationSyncUp extends Configured implements Tool {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
+ return null;
+ }
+
+ @Override
+ public MetaTableLocator getMetaTableLocator() {
return null;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 977a403..30175b8 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
@@ -1119,8 +1119,8 @@ public class AccessController extends BaseRegionObserver
@Override
public void postStartMaster(ObserverContext ctx)
throws IOException {
- if (!MetaReader.tableExists(ctx.getEnvironment().getMasterServices().getCatalogTracker(),
- AccessControlLists.ACL_TABLE_NAME)) {
+ if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices()
+ .getShortCircuitConnection(), AccessControlLists.ACL_TABLE_NAME)) {
// initialize the ACL storage table
AccessControlLists.init(ctx.getEnvironment().getMasterServices());
} else {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
index a152fcc..26a7881 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
@@ -215,7 +215,7 @@ public class VisibilityController extends BaseRegionObserver implements MasterOb
public void postStartMaster(ObserverContext ctx) throws IOException {
// Need to create the new system table for labels here
MasterServices master = ctx.getEnvironment().getMasterServices();
- if (!MetaReader.tableExists(master.getCatalogTracker(), LABELS_TABLE_NAME)) {
+ if (!MetaTableAccessor.tableExists(master.getShortCircuitConnection(), LABELS_TABLE_NAME)) {
HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
labelsColumn.setBloomFilterType(BloomType.NONE);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index d8d8642..a02250d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
-import java.io.InterruptedIOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
@@ -45,8 +44,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference;
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionM
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -253,7 +252,7 @@ public class RestoreSnapshotHelper {
/**
* Returns the list of new regions added during the on-disk restore.
* The caller is responsible to add the regions to META.
- * e.g MetaEditor.addRegionsToMeta(...)
+ * e.g MetaTableAccessor.addRegionsToMeta(...)
* @return the list of regions to add to META
*/
public List getRegionsToAdd() {
@@ -286,7 +285,7 @@ public class RestoreSnapshotHelper {
/**
* Returns the list of regions removed during the on-disk restore.
* The caller is responsible to remove the regions from META.
- * e.g. MetaEditor.deleteRegions(...)
+ * e.g. MetaTableAccessor.deleteRegions(...)
* @return the list of regions to remove from META
*/
public List getRegionsToRemove() {
@@ -315,7 +314,7 @@ public class RestoreSnapshotHelper {
regionsToRestore.add(hri);
}
- public void updateMetaParentRegions(final CatalogTracker catalogTracker,
+ public void updateMetaParentRegions(HConnection hConnection,
final List regionInfos) throws IOException {
if (regionInfos == null || parentsMap.isEmpty()) return;
@@ -346,9 +345,9 @@ public class RestoreSnapshotHelper {
}
LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> " + daughters);
- MetaEditor.addRegionToMeta(catalogTracker, regionInfo,
- regionsByName.get(daughters.getFirst()),
- regionsByName.get(daughters.getSecond()));
+ MetaTableAccessor.addRegionToMeta(hConnection, regionInfo,
+ regionsByName.get(daughters.getFirst()),
+ regionsByName.get(daughters.getSecond()));
}
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index d0c84b3..202c94e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -71,7 +71,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.io.IOUtils;
@@ -1136,7 +1136,7 @@ public class HBaseFsck extends Configured {
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
- Put p = MetaEditor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
@@ -1517,7 +1517,7 @@ public class HBaseFsck extends Configured {
ZooKeeperWatcher zkw = createZooKeeperWatcher();
ServerName sn = null;
try {
- sn = MetaRegionTracker.getMetaRegionLocation(zkw);
+ sn = new MetaTableLocator().getMetaRegionLocation(zkw);
} finally {
zkw.close();
}
@@ -1617,7 +1617,7 @@ public class HBaseFsck extends Configured {
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
- Put p = MetaEditor.makePutFromRegionInfo(hri);
+ Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index d985299..053831a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
@@ -175,7 +175,7 @@ public class HBaseFsckRepair {
public static void fixMetaHoleOnline(Configuration conf,
HRegionInfo hri) throws IOException {
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
meta.close();
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 80bf475..5d69827 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnectable;
@@ -331,7 +331,7 @@ class HMerge {
}
newRegion.getRegionInfo().setOffline(true);
- MetaEditor.addRegionToMeta(table, newRegion.getRegionInfo());
+ MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: "
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 7ed1530..4ecb75a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.NoServerForRegionException;
@@ -388,7 +388,7 @@ public class RegionSplitter {
// NOTE: createTable is synchronous on the table, but not on the regions
int onlineRegions = 0;
while (onlineRegions < splitCount) {
- onlineRegions = MetaReader.getRegionCount(conf, tableName);
+ onlineRegions = MetaTableAccessor.getRegionCount(conf, tableName);
LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
if (onlineRegions < splitCount) {
Thread.sleep(10 * 1000); // sleep
diff --git hbase-server/src/main/resources/hbase-webapps/master/table.jsp hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 1f579e6..d2949ee 100644
--- hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -30,6 +30,7 @@
import="org.apache.hadoop.hbase.ServerLoad"
import="org.apache.hadoop.hbase.RegionLoad"
import="org.apache.hadoop.hbase.master.HMaster"
+ import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"
@@ -40,10 +41,11 @@
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
Configuration conf = master.getConfiguration();
HBaseAdmin hbadmin = new HBaseAdmin(conf);
+ MetaTableLocator metaTableLocator = new MetaTableLocator();
String fqtn = request.getParameter("name");
HTable table = new HTable(conf, fqtn);
String tableHeader = "Table Regions
| Name | Region Server | Start Key | End Key | Requests |
";
- ServerName rl = master.getCatalogTracker().getMetaLocation();
+ ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false);
Map frags = null;
@@ -195,7 +197,7 @@
<%
// NOTE: Presumes one meta region only.
HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
- ServerName metaLocation = master.getCatalogTracker().waitForMeta(1);
+ ServerName metaLocation = metaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), 1);
for (int i = 0; i < 1; i++) {
String url = "//" + metaLocation.getHostname() + ":" + master.getRegionServerInfoPort(metaLocation) + "/";
%>
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 3824294..9c5dec3 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Waiter.Predicate;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -1973,7 +1973,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(table.getName(),
startKeys[i], startKeys[j]);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
newRegions.add(hri);
count++;
}
@@ -2025,7 +2025,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i],
startKeys[j]);
- MetaEditor.addRegionToMeta(meta, hri);
+ MetaTableAccessor.addRegionToMeta(meta, hri);
newRegions.add(hri);
}
@@ -2039,7 +2039,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @throws IOException When reading the rows fails.
*/
public List getMetaTableRows() throws IOException {
- // TODO: Redo using MetaReader class
+ // TODO: Redo using MetaTableAccessor class
HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
List rows = new ArrayList();
ResultScanner s = t.getScanner(new Scan());
@@ -2059,7 +2059,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @throws IOException When reading the rows fails.
*/
public List getMetaTableRows(TableName tableName) throws IOException {
- // TODO: Redo using MetaReader.
+ // TODO: Redo using MetaTableAccessor.
HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
List rows = new ArrayList();
ResultScanner s = t.getScanner(new Scan());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index ec47547..14e8d1f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -26,7 +26,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.ServerNonceManager;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
@@ -92,7 +93,7 @@ class MockRegionServerServices implements RegionServerServices {
}
@Override
- public void postOpenDeployTasks(HRegion r, CatalogTracker ct)
+ public void postOpenDeployTasks(HRegion r)
throws KeeperException, IOException {
addToOnlineRegions(r);
}
@@ -110,7 +111,7 @@ class MockRegionServerServices implements RegionServerServices {
public void setRpcServer(RpcServerInterface rpc) {
this.rpcServer = rpc;
}
-
+
@Override
public ConcurrentSkipListMap getRegionsInTransitionInRS() {
return rit;
@@ -127,7 +128,12 @@ class MockRegionServerServices implements RegionServerServices {
}
@Override
- public CatalogTracker getCatalogTracker() {
+ public HConnection getShortCircuitConnection() {
+ return null;
+ }
+
+ @Override
+ public MetaTableLocator getMetaTableLocator() {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
index 2755735..db087ff 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -94,7 +93,6 @@ public class TestDrainingServer {
final ServerName SERVERNAME_A = ServerName.valueOf("mockserver_a.org", 1000, 8000);
final ServerName SERVERNAME_B = ServerName.valueOf("mockserver_b.org", 1001, 8000);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(conf);
- CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class);
final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
@@ -138,7 +136,7 @@ public class TestDrainingServer {
Mockito.when(master.getServerManager()).thenReturn(serverManager);
- am = new AssignmentManager(server, serverManager, catalogTracker,
+ am = new AssignmentManager(server, serverManager,
balancer, startupMasterExecutor("mockExecutorService"), null, null);
Mockito.when(master.getAssignmentManager()).thenReturn(am);
@@ -165,7 +163,6 @@ public class TestDrainingServer {
public void testAssignmentManagerDoesntUseDrainedServerWithBulkAssign() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(conf);
- CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class);
AssignmentManager am;
final HMaster master = Mockito.mock(HMaster.class);
final Server server = Mockito.mock(Server.class);
@@ -243,7 +240,7 @@ public class TestDrainingServer {
drainedServers.add(SERVERNAME_C);
drainedServers.add(SERVERNAME_D);
- am = new AssignmentManager(server, serverManager, catalogTracker,
+ am = new AssignmentManager(server, serverManager,
balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null);
Mockito.when(master.getAssignmentManager()).thenReturn(am);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index 0b0e290..81c5e5e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -29,8 +29,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -98,13 +97,9 @@ public class TestRegionRebalancing {
admin.createTable(this.desc, Arrays.copyOfRange(HBaseTestingUtility.KEYS,
1, HBaseTestingUtility.KEYS.length));
this.table = new HTable(UTIL.getConfiguration(), this.desc.getTableName());
- CatalogTracker ct = new CatalogTracker(UTIL.getConfiguration());
- ct.start();
- try {
- MetaReader.fullScanMetaAndPrint(ct);
- } finally {
- ct.stop();
- }
+
+ MetaTableAccessor.fullScanMetaAndPrint(admin.getConnection());
+
assertEquals("Test table should have right number of regions",
HBaseTestingUtility.KEYS.length,
this.table.getStartKeys().length);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
deleted file mode 100644
index caeafbd..0000000
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.ConnectException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.util.Progressable;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * Test {@link CatalogTracker}
- */
-@Category(MediumTests.class)
-public class TestCatalogTracker {
- private static final Log LOG = LogFactory.getLog(TestCatalogTracker.class);
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
- private static final ServerName SN =
- ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
- private ZooKeeperWatcher watcher;
- private Abortable abortable;
-
- @BeforeClass public static void beforeClass() throws Exception {
- // Set this down so tests run quicker
- UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
- UTIL.startMiniZKCluster();
- }
-
- @AfterClass public static void afterClass() throws IOException {
- UTIL.getZkCluster().shutdown();
- }
-
- @Before public void before() throws IOException {
- this.abortable = new Abortable() {
- @Override
- public void abort(String why, Throwable e) {
- LOG.info(why, e);
- }
-
- @Override
- public boolean isAborted() {
- return false;
- }
- };
- this.watcher = new ZooKeeperWatcher(UTIL.getConfiguration(),
- this.getClass().getSimpleName(), this.abortable, true);
- }
-
- @After public void after() {
- try {
- // Clean out meta location or later tests will be confused... they presume
- // start fresh in zk.
- MetaRegionTracker.deleteMetaLocation(this.watcher);
- } catch (KeeperException e) {
- LOG.warn("Unable to delete hbase:meta location", e);
- }
-
- // Clear out our doctored connection or could mess up subsequent tests.
- HConnectionManager.deleteConnection(UTIL.getConfiguration());
-
- this.watcher.close();
- }
-
- private CatalogTracker constructAndStartCatalogTracker(final HConnection c)
- throws IOException, InterruptedException {
- CatalogTracker ct = new CatalogTracker(this.watcher, UTIL.getConfiguration(),
- c, this.abortable);
- ct.start();
- return ct;
- }
-
- /**
- * Test that we get notification if hbase:meta moves.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- */
- @Test public void testThatIfMETAMovesWeAreNotified()
- throws IOException, InterruptedException, KeeperException {
- HConnection connection = Mockito.mock(HConnection.class);
- constructAndStartCatalogTracker(connection);
-
- MetaRegionTracker.setMetaLocation(this.watcher,
- ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
- }
-
- /**
- * Test interruptable while blocking wait on meta.
- * @throws IOException
- * @throws ServiceException
- * @throws InterruptedException
- */
- @Test public void testInterruptWaitOnMeta()
- throws IOException, InterruptedException, ServiceException {
- final ClientProtos.ClientService.BlockingInterface client =
- Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
- HConnection connection = mockConnection(null, client);
-
- Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
- thenReturn(GetResponse.newBuilder().build());
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ServerName meta = ct.getMetaLocation();
- Assert.assertNull(meta);
- Thread t = new Thread() {
- @Override
- public void run() {
- try {
- ct.waitForMeta();
- } catch (InterruptedException e) {
- throw new RuntimeException("Interrupted", e);
- }
- }
- };
- t.start();
- while (!t.isAlive())
- Threads.sleep(1);
- Threads.sleep(1);
- assertTrue(t.isAlive());
- ct.stop();
- // Join the thread... should exit shortly.
- t.join();
- }
-
- private void testVerifyMetaRegionLocationWithException(Exception ex)
- throws IOException, InterruptedException, KeeperException, ServiceException {
- // Mock an ClientProtocol.
- final ClientProtos.ClientService.BlockingInterface implementation =
- Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
- HConnection connection = mockConnection(null, implementation);
-
- // If a 'get' is called on mocked interface, throw connection refused.
- Mockito.when(implementation.get((RpcController) Mockito.any(), (GetRequest) Mockito.any())).
- thenThrow(new ServiceException(ex));
- // Now start up the catalogtracker with our doctored Connection.
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
-
- MetaRegionTracker.setMetaLocation(this.watcher, SN);
- long timeout = UTIL.getConfiguration().
- getLong("hbase.catalog.verification.timeout", 1000);
- Assert.assertFalse(ct.verifyMetaRegionLocation(timeout));
- }
-
- /**
- * Test we survive a connection refused {@link ConnectException}
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- * @throws ServiceException
- */
- @Test
- public void testGetMetaServerConnectionFails()
- throws IOException, InterruptedException, KeeperException, ServiceException {
- testVerifyMetaRegionLocationWithException(new ConnectException("Connection refused"));
- }
-
- /**
- * Test that verifyMetaRegionLocation properly handles getting a
- * ServerNotRunningException. See HBASE-4470.
- * Note this doesn't check the exact exception thrown in the
- * HBASE-4470 as there it is thrown from getHConnection() and
- * here it is thrown from get() -- but those are both called
- * from the same function anyway, and this way is less invasive than
- * throwing from getHConnection would be.
- *
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- * @throws ServiceException
- */
- @Test
- public void testVerifyMetaRegionServerNotRunning()
- throws IOException, InterruptedException, KeeperException, ServiceException {
- testVerifyMetaRegionLocationWithException(new ServerNotRunningYetException("mock"));
- }
-
- /**
- * Test get of meta region fails properly if nothing to connect to.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- * @throws ServiceException
- */
- @Test
- public void testVerifyMetaRegionLocationFails()
- throws IOException, InterruptedException, KeeperException, ServiceException {
- HConnection connection = Mockito.mock(HConnection.class);
- ServiceException connectException =
- new ServiceException(new ConnectException("Connection refused"));
- final AdminProtos.AdminService.BlockingInterface implementation =
- Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
- Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
- (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
- Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())).
- thenReturn(implementation);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
-
- MetaRegionTracker.setMetaLocation(this.watcher,
- ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
- Assert.assertFalse(ct.verifyMetaRegionLocation(100));
- }
-
- @Test (expected = NotAllMetaRegionsOnlineException.class)
- public void testTimeoutWaitForMeta()
- throws IOException, InterruptedException {
- HConnection connection = Mockito.mock(HConnection.class);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ct.waitForMeta(100);
- }
-
- /**
- * Test waiting on meat w/ no timeout specified.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
- */
- @Test public void testNoTimeoutWaitForMeta()
- throws IOException, InterruptedException, KeeperException {
- HConnection connection = Mockito.mock(HConnection.class);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ServerName hsa = ct.getMetaLocation();
- Assert.assertNull(hsa);
-
- // Now test waiting on meta location getting set.
- Thread t = new WaitOnMetaThread(ct);
- startWaitAliveThenWaitItLives(t, 1);
- // Set a meta location.
- hsa = setMetaLocation();
- // Join the thread... should exit shortly.
- t.join();
- // Now meta is available.
- Assert.assertTrue(ct.getMetaLocation().equals(hsa));
- }
-
- private ServerName setMetaLocation() throws KeeperException {
- MetaRegionTracker.setMetaLocation(this.watcher, SN);
- return SN;
- }
-
- /**
- * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
- * want to pass a mocked HRS; can be null.
- * @param client A mocked ClientProtocol instance, can be null
- * @return Mock up a connection that returns a {@link Configuration} when
- * {@link HConnection#getConfiguration()} is called, a 'location' when
- * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
- * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
- * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
- * {@link ClientProtos.ClientService.BlockingInterface} instance when
- * {@link HConnection#getClient(ServerName)} is called (Be sure to call
- * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)}
- * when done with this mocked Connection.
- * @throws IOException
- */
- private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
- final ClientProtos.ClientService.BlockingInterface client)
- throws IOException {
- HConnection connection =
- HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
- Mockito.doNothing().when(connection).close();
- // Make it so we return any old location when asked.
- final HRegionLocation anyLocation =
- new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
- Mockito.when(connection.getRegionLocation((TableName) Mockito.any(),
- (byte[]) Mockito.any(), Mockito.anyBoolean())).
- thenReturn(anyLocation);
- Mockito.when(connection.locateRegion((TableName) Mockito.any(),
- (byte[]) Mockito.any())).
- thenReturn(anyLocation);
- if (admin != null) {
- // If a call to getHRegionConnection, return this implementation.
- Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).
- thenReturn(admin);
- }
- if (client != null) {
- // If a call to getClient, return this implementation.
- Mockito.when(connection.getClient(Mockito.any(ServerName.class))).
- thenReturn(client);
- }
- return connection;
- }
-
- /**
- * @return A mocked up Result that fakes a Get on a row in the
- * hbase:meta table.
- * @throws IOException
- */
- private Result getMetaTableRowResult() throws IOException {
- return MetaMockingUtil.getMetaTableRowResult(HRegionInfo.FIRST_META_REGIONINFO, SN);
- }
-
- private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
- t.start();
- while(!t.isAlive()) {
- // Wait
- }
- // Wait one second.
- Threads.sleep(ms);
- Assert.assertTrue("Assert " + t.getName() + " still waiting", t.isAlive());
- }
-
- class CountingProgressable implements Progressable {
- final AtomicInteger counter = new AtomicInteger(0);
- @Override
- public void progress() {
- this.counter.incrementAndGet();
- }
- }
-
- /**
- * Wait on META.
- */
- class WaitOnMetaThread extends Thread {
- final CatalogTracker ct;
-
- WaitOnMetaThread(final CatalogTracker ct) {
- super("WaitOnMeta");
- this.ct = ct;
- }
-
- @Override
- public void run() {
- try {
- doWaiting();
- } catch (InterruptedException e) {
- throw new RuntimeException("Failed wait", e);
- }
- LOG.info("Exiting " + getName());
- }
-
- void doWaiting() throws InterruptedException {
- try {
- while (this.ct.waitForMeta(100) == null);
- } catch (NotAllMetaRegionsOnlineException e) {
- // Ignore.
- }
- }
- }
-
-}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
index 5b6cb03..b5671ff 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.migration.NamespaceUpgrade;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -182,9 +183,9 @@ public class TestMetaMigrationConvertingToPB {
public void testMetaUpdatedFlagInROOT() throws Exception {
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
boolean metaUpdated = MetaMigrationConvertingToPB.
- isMetaTableUpdated(master.getCatalogTracker());
+ isMetaTableUpdated(master.getShortCircuitConnection());
assertEquals(true, metaUpdated);
- verifyMetaRowsAreUpdated(master.getCatalogTracker());
+ verifyMetaRowsAreUpdated(master.getShortCircuitConnection());
}
@Test
@@ -202,26 +203,24 @@ public class TestMetaMigrationConvertingToPB {
createMultiRegionsWithWritableSerialization(conf,
htd.getTableName().getName(),
regionNames);
- CatalogTracker ct =
- TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
+ HConnection masterHConnection =
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getShortCircuitConnection();
// Erase the current version of root meta for this test.
- undoVersionInRoot(ct);
- MetaReader.fullScanMetaAndPrint(ct);
+ undoVersionInRoot();
+ MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
LOG.info("Meta Print completed.testMetaMigration");
long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
TEST_UTIL.getHBaseCluster().getMaster());
- MetaReader.fullScanMetaAndPrint(ct);
+ MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
// Should be one entry only and it should be for the table we just added.
assertEquals(regionNames.length, numMigratedRows);
// Assert that the flag in ROOT is updated to reflect the correct status
- boolean metaUpdated =
- MetaMigrationConvertingToPB.isMetaTableUpdated(
- TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
+ boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
assertEquals(true, metaUpdated);
- verifyMetaRowsAreUpdated(ct);
+ verifyMetaRowsAreUpdated(masterHConnection);
}
/**
@@ -249,12 +248,12 @@ public class TestMetaMigrationConvertingToPB {
// Create 10 Legacy regions.
createMultiRegionsWithWritableSerialization(conf,
htd.getTableName().getName(), 10);
- CatalogTracker ct =
- TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker();
+ HConnection masterHConnection =
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getShortCircuitConnection();
// Erase the current version of root meta for this test.
- undoVersionInRoot(ct);
+ undoVersionInRoot();
- MetaReader.fullScanMetaAndPrint(ct);
+ MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
long numMigratedRows =
@@ -263,11 +262,10 @@ public class TestMetaMigrationConvertingToPB {
assertEquals(numMigratedRows, 10);
// Assert that the flag in ROOT is updated to reflect the correct status
- boolean metaUpdated = MetaMigrationConvertingToPB.
- isMetaTableUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker());
+ boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
assertEquals(true, metaUpdated);
- verifyMetaRowsAreUpdated(ct);
+ verifyMetaRowsAreUpdated(masterHConnection);
LOG.info("END testMasterCrashDuringMetaMigration");
}
@@ -275,9 +273,9 @@ public class TestMetaMigrationConvertingToPB {
/**
* Verify that every hbase:meta row is updated
*/
- void verifyMetaRowsAreUpdated(CatalogTracker catalogTracker)
+ void verifyMetaRowsAreUpdated(HConnection hConnection)
throws IOException {
- List results = MetaReader.fullScan(catalogTracker);
+ List results = MetaTableAccessor.fullScan(hConnection);
assertTrue(results.size() >= REGION_COUNT);
for (Result result : results) {
@@ -301,7 +299,7 @@ public class TestMetaMigrationConvertingToPB {
}
/** Changes the version of hbase:meta to 0 to simulate 0.92 and 0.94 clusters*/
- private void undoVersionInRoot(CatalogTracker ct) throws IOException {
+ private void undoVersionInRoot() throws IOException {
Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
@@ -424,7 +422,7 @@ public class TestMetaMigrationConvertingToPB {
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
- Put put = MetaEditor.makePutFromRegionInfo(hri);
+ Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
put.setDurability(Durability.SKIP_WAL);
meta.put(put);
LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
deleted file mode 100644
index 8597916..0000000
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
+++ /dev/null
@@ -1,278 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test {@link MetaReader}, {@link MetaEditor}.
- */
-@Category(MediumTests.class)
-public class TestMetaReaderEditor {
- private static final Log LOG = LogFactory.getLog(TestMetaReaderEditor.class);
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
- private static CatalogTracker CT;
-
- @BeforeClass public static void beforeClass() throws Exception {
- UTIL.startMiniCluster(3);
-
- Configuration c = new Configuration(UTIL.getConfiguration());
- // Tests to 4 retries every 5 seconds. Make it try every 1 second so more
- // responsive. 1 second is default as is ten retries.
- c.setLong("hbase.client.pause", 1000);
- c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
- CT = new CatalogTracker(c);
- CT.start();
- }
-
- @AfterClass public static void afterClass() throws Exception {
- CT.stop();
- UTIL.shutdownMiniCluster();
- }
-
- /**
- * Does {@link MetaReader#getRegion(CatalogTracker, byte[])} and a write
- * against hbase:meta while its hosted server is restarted to prove our retrying
- * works.
- * @throws IOException
- * @throws InterruptedException
- */
- @Test public void testRetrying()
- throws IOException, InterruptedException {
- final TableName name =
- TableName.valueOf("testRetrying");
- LOG.info("Started " + name);
- HTable t = UTIL.createTable(name, HConstants.CATALOG_FAMILY);
- int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
- // Test it works getting a region from just made user table.
- final List regions =
- testGettingTableRegions(CT, name, regionCount);
- MetaTask reader = new MetaTask(CT, "reader") {
- @Override
- void metaTask() throws Throwable {
- testGetRegion(this.ct, regions.get(0));
- LOG.info("Read " + regions.get(0).getEncodedName());
- }
- };
- MetaTask writer = new MetaTask(CT, "writer") {
- @Override
- void metaTask() throws Throwable {
- MetaEditor.addRegionToMeta(this.ct, regions.get(0));
- LOG.info("Wrote " + regions.get(0).getEncodedName());
- }
- };
- reader.start();
- writer.start();
-
- // We're gonna check how it takes. If it takes too long, we will consider
- // it as a fail. We can't put that in the @Test tag as we want to close
- // the threads nicely
- final long timeOut = 180000;
- long startTime = System.currentTimeMillis();
-
- try {
- // Make sure reader and writer are working.
- assertTrue(reader.isProgressing());
- assertTrue(writer.isProgressing());
-
- // Kill server hosting meta -- twice . See if our reader/writer ride over the
- // meta moves. They'll need to retry.
- for (int i = 0; i < 2; i++) {
- LOG.info("Restart=" + i);
- UTIL.ensureSomeRegionServersAvailable(2);
- int index = -1;
- do {
- index = UTIL.getMiniHBaseCluster().getServerWithMeta();
- } while (index == -1 &&
- startTime + timeOut < System.currentTimeMillis());
-
- if (index != -1){
- UTIL.getMiniHBaseCluster().abortRegionServer(index);
- UTIL.getMiniHBaseCluster().waitOnRegionServer(index);
- }
- }
-
- assertTrue("reader: " + reader.toString(), reader.isProgressing());
- assertTrue("writer: " + writer.toString(), writer.isProgressing());
- } catch (IOException e) {
- throw e;
- } finally {
- reader.stop = true;
- writer.stop = true;
- reader.join();
- writer.join();
- t.close();
- }
- long exeTime = System.currentTimeMillis() - startTime;
- assertTrue("Timeout: test took " + exeTime / 1000 + " sec", exeTime < timeOut);
- }
-
- /**
- * Thread that runs a MetaReader/MetaEditor task until asked stop.
- */
- abstract static class MetaTask extends Thread {
- boolean stop = false;
- int count = 0;
- Throwable t = null;
- final CatalogTracker ct;
-
- MetaTask(final CatalogTracker ct, final String name) {
- super(name);
- this.ct = ct;
- }
-
- @Override
- public void run() {
- try {
- while(!this.stop) {
- LOG.info("Before " + this.getName()+ ", count=" + this.count);
- metaTask();
- this.count += 1;
- LOG.info("After " + this.getName() + ", count=" + this.count);
- Thread.sleep(100);
- }
- } catch (Throwable t) {
- LOG.info(this.getName() + " failed", t);
- this.t = t;
- }
- }
-
- boolean isProgressing() throws InterruptedException {
- int currentCount = this.count;
- while(currentCount == this.count) {
- if (!isAlive()) return false;
- if (this.t != null) return false;
- Thread.sleep(10);
- }
- return true;
- }
-
- @Override
- public String toString() {
- return "count=" + this.count + ", t=" +
- (this.t == null? "null": this.t.toString());
- }
-
- abstract void metaTask() throws Throwable;
- }
-
- @Test public void testGetRegionsCatalogTables()
- throws IOException, InterruptedException {
- List regions =
- MetaReader.getTableRegions(CT, TableName.META_TABLE_NAME);
- assertTrue(regions.size() >= 1);
- assertTrue(MetaReader.getTableRegionsAndLocations(CT,
- TableName.META_TABLE_NAME).size() >= 1);
- }
-
- @Test public void testTableExists() throws IOException {
- final TableName name =
- TableName.valueOf("testTableExists");
- assertFalse(MetaReader.tableExists(CT, name));
- UTIL.createTable(name, HConstants.CATALOG_FAMILY);
- assertTrue(MetaReader.tableExists(CT, name));
- HBaseAdmin admin = UTIL.getHBaseAdmin();
- admin.disableTable(name);
- admin.deleteTable(name);
- assertFalse(MetaReader.tableExists(CT, name));
- assertTrue(MetaReader.tableExists(CT,
- TableName.META_TABLE_NAME));
- }
-
- @Test public void testGetRegion() throws IOException, InterruptedException {
- final String name = "testGetRegion";
- LOG.info("Started " + name);
- // Test get on non-existent region.
- Pair pair =
- MetaReader.getRegion(CT, Bytes.toBytes("nonexistent-region"));
- assertNull(pair);
- LOG.info("Finished " + name);
- }
-
- // Test for the optimization made in HBASE-3650
- @Test public void testScanMetaForTable()
- throws IOException, InterruptedException {
- final TableName name =
- TableName.valueOf("testScanMetaForTable");
- LOG.info("Started " + name);
-
- /** Create 2 tables
- - testScanMetaForTable
- - testScanMetaForTablf
- **/
-
- UTIL.createTable(name, HConstants.CATALOG_FAMILY);
- // name that is +1 greater than the first one (e+1=f)
- TableName greaterName =
- TableName.valueOf("testScanMetaForTablf");
- UTIL.createTable(greaterName, HConstants.CATALOG_FAMILY);
-
- // Now make sure we only get the regions from 1 of the tables at a time
-
- assertEquals(1, MetaReader.getTableRegions(CT, name).size());
- assertEquals(1, MetaReader.getTableRegions(CT, greaterName).size());
- }
-
- private static List testGettingTableRegions(final CatalogTracker ct,
- final TableName name, final int regionCount)
- throws IOException, InterruptedException {
- List regions = MetaReader.getTableRegions(ct, name);
- assertEquals(regionCount, regions.size());
- Pair pair =
- MetaReader.getRegion(ct, regions.get(0).getRegionName());
- assertEquals(regions.get(0).getEncodedName(),
- pair.getFirst().getEncodedName());
- return regions;
- }
-
- private static void testGetRegion(final CatalogTracker ct,
- final HRegionInfo region)
- throws IOException, InterruptedException {
- Pair pair =
- MetaReader.getRegion(ct, region.getRegionName());
- assertEquals(region.getEncodedName(),
- pair.getFirst().getEncodedName());
- }
-
-}
-
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
deleted file mode 100644
index 6ae0ecd..0000000
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.catalog;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.NavigableMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellScannable;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * Test MetaReader/Editor but without spinning up a cluster.
- * We mock regionserver back and forth (we do spin up a zk cluster).
- */
-@Category(MediumTests.class)
-public class TestMetaReaderEditorNoCluster {
- private static final Log LOG = LogFactory.getLog(TestMetaReaderEditorNoCluster.class);
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
- private static final Abortable ABORTABLE = new Abortable() {
- boolean aborted = false;
- @Override
- public void abort(String why, Throwable e) {
- LOG.info(why, e);
- this.aborted = true;
- throw new RuntimeException(e);
- }
- @Override
- public boolean isAborted() {
- return this.aborted;
- }
- };
-
- @Before
- public void before() throws Exception {
- UTIL.startMiniZKCluster();
- }
-
- @After
- public void after() throws IOException {
- UTIL.shutdownMiniZKCluster();
- }
-
- @Test
- public void testGetHRegionInfo() throws IOException {
- assertNull(HRegionInfo.getHRegionInfo(new Result()));
-
- List kvs = new ArrayList();
- Result r = Result.create(kvs);
- assertNull(HRegionInfo.getHRegionInfo(r));
-
- byte [] f = HConstants.CATALOG_FAMILY;
- // Make a key value that doesn't have the expected qualifier.
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
- HConstants.SERVER_QUALIFIER, f));
- r = Result.create(kvs);
- assertNull(HRegionInfo.getHRegionInfo(r));
- // Make a key that does not have a regioninfo value.
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
- HConstants.REGIONINFO_QUALIFIER, f));
- HRegionInfo hri = HRegionInfo.getHRegionInfo(Result.create(kvs));
- assertTrue(hri == null);
- // OK, give it what it expects
- kvs.clear();
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
- HConstants.REGIONINFO_QUALIFIER,
- HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
- hri = HRegionInfo.getHRegionInfo(Result.create(kvs));
- assertNotNull(hri);
- assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
- }
-
- /**
- * Test that MetaReader will ride over server throwing
- * "Server not running" IOEs.
- * @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
- * @throws IOException
- * @throws InterruptedException
- */
- @Test
- public void testRideOverServerNotRunning()
- throws IOException, InterruptedException, ServiceException {
- // Need a zk watcher.
- ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(),
- this.getClass().getSimpleName(), ABORTABLE, true);
- // This is a servername we use in a few places below.
- ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
-
- HConnection connection;
- CatalogTracker ct = null;
- try {
- // Mock an ClientProtocol. Our mock implementation will fail a few
- // times when we go to open a scanner.
- final ClientProtos.ClientService.BlockingInterface implementation =
- Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
- // When scan called throw IOE 'Server not running' a few times
- // before we return a scanner id. Whats WEIRD is that these
- // exceptions do not show in the log because they are caught and only
- // printed if we FAIL. We eventually succeed after retry so these don't
- // show. We will know if they happened or not because we will ask
- // mockito at the end of this test to verify that scan was indeed
- // called the wanted number of times.
- List kvs = new ArrayList();
- final byte [] rowToVerify = Bytes.toBytes("rowToVerify");
- kvs.add(new KeyValue(rowToVerify,
- HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
- kvs.add(new KeyValue(rowToVerify,
- HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(sn.getHostAndPort())));
- kvs.add(new KeyValue(rowToVerify,
- HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
- Bytes.toBytes(sn.getStartcode())));
- final List cellScannables = new ArrayList(1);
- cellScannables.add(Result.create(kvs));
- final ScanResponse.Builder builder = ScanResponse.newBuilder();
- for (CellScannable result : cellScannables) {
- builder.addCellsPerResult(((Result)result).size());
- }
- Mockito.when(implementation.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any()))
- .thenThrow(new ServiceException("Server not running (1 of 3)"))
- .thenThrow(new ServiceException("Server not running (2 of 3)"))
- .thenThrow(new ServiceException("Server not running (3 of 3)"))
- .thenReturn(ScanResponse.newBuilder().setScannerId(1234567890L).build())
- .thenAnswer(new Answer() {
- public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
- ((PayloadCarryingRpcController) invocation.getArguments()[0]).setCellScanner(CellUtil
- .createCellScanner(cellScannables));
- return builder.build();
- }
- }).thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
- // Associate a spied-upon HConnection with UTIL.getConfiguration. Need
- // to shove this in here first so it gets picked up all over; e.g. by
- // HTable.
- connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration());
- // Fix the location lookup so it 'works' though no network. First
- // make an 'any location' object.
- final HRegionLocation anyLocation =
- new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn);
- // Return the any location object when locateRegion is called in HTable
- // constructor and when its called by ServerCallable (it uses getRegionLocation).
- // The ugly format below comes of 'Important gotcha on spying real objects!' from
- // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
- Mockito.doReturn(anyLocation).
- when(connection).locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any());
- Mockito.doReturn(anyLocation).
- when(connection).getRegionLocation((TableName) Mockito.any(),
- (byte[]) Mockito.any(), Mockito.anyBoolean());
-
- // Now shove our HRI implementation into the spied-upon connection.
- Mockito.doReturn(implementation).
- when(connection).getClient(Mockito.any(ServerName.class));
-
- // Now start up the catalogtracker with our doctored Connection.
- ct = new CatalogTracker(zkw, null, connection, ABORTABLE);
- ct.start();
- // Scan meta for user tables and verify we got back expected answer.
- NavigableMap hris = MetaReader.getServerUserRegions(ct, sn);
- assertEquals(1, hris.size());
- assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO));
- assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
- // Finally verify that scan was called four times -- three times
- // with exception and then on 4th, 5th and 6th attempt we succeed
- Mockito.verify(implementation, Mockito.times(6)).
- scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any());
- } finally {
- if (ct != null) ct.stop();
- HConnectionManager.deleteConnection(UTIL.getConfiguration());
- zkw.close();
- }
- }
-}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableAccessor.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableAccessor.java
new file mode 100644
index 0000000..06f61ef
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableAccessor.java
@@ -0,0 +1,283 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.catalog;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test {@link MetaTableAccessor}.
+ */
+@Category(MediumTests.class)
+public class TestMetaTableAccessor {
+ private static final Log LOG = LogFactory.getLog(TestMetaTableAccessor.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static HConnection hConnection;
+
+ @BeforeClass public static void beforeClass() throws Exception {
+ UTIL.startMiniCluster(3);
+
+ Configuration c = new Configuration(UTIL.getConfiguration());
+ // Tests to 4 retries every 5 seconds. Make it try every 1 second so more
+ // responsive. 1 second is default as is ten retries.
+ c.setLong("hbase.client.pause", 1000);
+ c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
+ hConnection = HConnectionManager.getConnection(c);
+ }
+
+ @AfterClass public static void afterClass() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ /**
+ * Does {@link MetaTableAccessor#getRegion(org.apache.hadoop.hbase.client.HConnection,
+ * byte[])} and a write
+ * against hbase:meta while its hosted server is restarted to prove our retrying
+ * works.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test public void testRetrying()
+ throws IOException, InterruptedException {
+ final TableName name =
+ TableName.valueOf("testRetrying");
+ LOG.info("Started " + name);
+ HTable t = UTIL.createTable(name, HConstants.CATALOG_FAMILY);
+ int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
+ // Test it works getting a region from just made user table.
+ final List regions =
+ testGettingTableRegions(hConnection, name, regionCount);
+ MetaTask reader = new MetaTask(hConnection, "reader") {
+ @Override
+ void metaTask() throws Throwable {
+ testGetRegion(hConnection, regions.get(0));
+ LOG.info("Read " + regions.get(0).getEncodedName());
+ }
+ };
+ MetaTask writer = new MetaTask(hConnection, "writer") {
+ @Override
+ void metaTask() throws Throwable {
+ MetaTableAccessor.addRegionToMeta(hConnection, regions.get(0));
+ LOG.info("Wrote " + regions.get(0).getEncodedName());
+ }
+ };
+ reader.start();
+ writer.start();
+
+ // We're gonna check how it takes. If it takes too long, we will consider
+ // it as a fail. We can't put that in the @Test tag as we want to close
+ // the threads nicely
+ final long timeOut = 180000;
+ long startTime = System.currentTimeMillis();
+
+ try {
+ // Make sure reader and writer are working.
+ assertTrue(reader.isProgressing());
+ assertTrue(writer.isProgressing());
+
+ // Kill server hosting meta -- twice . See if our reader/writer ride over the
+ // meta moves. They'll need to retry.
+ for (int i = 0; i < 2; i++) {
+ LOG.info("Restart=" + i);
+ UTIL.ensureSomeRegionServersAvailable(2);
+ int index = -1;
+ do {
+ index = UTIL.getMiniHBaseCluster().getServerWithMeta();
+ } while (index == -1 &&
+ startTime + timeOut < System.currentTimeMillis());
+
+ if (index != -1){
+ UTIL.getMiniHBaseCluster().abortRegionServer(index);
+ UTIL.getMiniHBaseCluster().waitOnRegionServer(index);
+ }
+ }
+
+ assertTrue("reader: " + reader.toString(), reader.isProgressing());
+ assertTrue("writer: " + writer.toString(), writer.isProgressing());
+ } catch (IOException e) {
+ throw e;
+ } finally {
+ reader.stop = true;
+ writer.stop = true;
+ reader.join();
+ writer.join();
+ t.close();
+ }
+ long exeTime = System.currentTimeMillis() - startTime;
+ assertTrue("Timeout: test took " + exeTime / 1000 + " sec", exeTime < timeOut);
+ }
+
+ /**
+ * Thread that runs a MetaTableAccessor task until asked stop.
+ */
+ abstract static class MetaTask extends Thread {
+ boolean stop = false;
+ int count = 0;
+ Throwable t = null;
+ final HConnection hConnection;
+
+ MetaTask(final HConnection hConnection, final String name) {
+ super(name);
+ this.hConnection = hConnection;
+ }
+
+ @Override
+ public void run() {
+ try {
+ while(!this.stop) {
+ LOG.info("Before " + this.getName()+ ", count=" + this.count);
+ metaTask();
+ this.count += 1;
+ LOG.info("After " + this.getName() + ", count=" + this.count);
+ Thread.sleep(100);
+ }
+ } catch (Throwable t) {
+ LOG.info(this.getName() + " failed", t);
+ this.t = t;
+ }
+ }
+
+ boolean isProgressing() throws InterruptedException {
+ int currentCount = this.count;
+ while(currentCount == this.count) {
+ if (!isAlive()) return false;
+ if (this.t != null) return false;
+ Thread.sleep(10);
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "count=" + this.count + ", t=" +
+ (this.t == null? "null": this.t.toString());
+ }
+
+ abstract void metaTask() throws Throwable;
+ }
+
+ @Test public void testGetRegionsCatalogTables()
+ throws IOException, InterruptedException {
+ List regions =
+ MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, TableName.META_TABLE_NAME);
+ assertTrue(regions.size() >= 1);
+ assertTrue(MetaTableAccessor.getTableRegionsAndLocations(UTIL.getZooKeeperWatcher(),
+ hConnection,TableName.META_TABLE_NAME).size() >= 1);
+ }
+
+ @Test public void testTableExists() throws IOException {
+ final TableName name =
+ TableName.valueOf("testTableExists");
+ assertFalse(MetaTableAccessor.tableExists(hConnection, name));
+ UTIL.createTable(name, HConstants.CATALOG_FAMILY);
+ assertTrue(MetaTableAccessor.tableExists(hConnection, name));
+ HBaseAdmin admin = UTIL.getHBaseAdmin();
+ admin.disableTable(name);
+ admin.deleteTable(name);
+ assertFalse(MetaTableAccessor.tableExists(hConnection, name));
+ assertTrue(MetaTableAccessor.tableExists(hConnection,
+ TableName.META_TABLE_NAME));
+ }
+
+ @Test public void testGetRegion() throws IOException, InterruptedException {
+ final String name = "testGetRegion";
+ LOG.info("Started " + name);
+ // Test get on non-existent region.
+ Pair pair =
+ MetaTableAccessor.getRegion(hConnection, Bytes.toBytes("nonexistent-region"));
+ assertNull(pair);
+ LOG.info("Finished " + name);
+ }
+
+ // Test for the optimization made in HBASE-3650
+ @Test public void testScanMetaForTable()
+ throws IOException, InterruptedException {
+ final TableName name =
+ TableName.valueOf("testScanMetaForTable");
+ LOG.info("Started " + name);
+
+ /** Create 2 tables
+ - testScanMetaForTable
+ - testScanMetaForTablf
+ **/
+
+ UTIL.createTable(name, HConstants.CATALOG_FAMILY);
+ // name that is +1 greater than the first one (e+1=f)
+ TableName greaterName =
+ TableName.valueOf("testScanMetaForTablf");
+ UTIL.createTable(greaterName, HConstants.CATALOG_FAMILY);
+
+ // Now make sure we only get the regions from 1 of the tables at a time
+
+ assertEquals(1, MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, name).size());
+ assertEquals(1, MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, greaterName).size());
+ }
+
+ private static List testGettingTableRegions(final HConnection hConnection,
+ final TableName name, final int regionCount)
+ throws IOException, InterruptedException {
+ List regions = MetaTableAccessor.getTableRegions(UTIL.getZooKeeperWatcher(),
+ hConnection, name);
+ assertEquals(regionCount, regions.size());
+ Pair pair =
+ MetaTableAccessor.getRegion(hConnection, regions.get(0).getRegionName());
+ assertEquals(regions.get(0).getEncodedName(),
+ pair.getFirst().getEncodedName());
+ return regions;
+ }
+
+ private static void testGetRegion(final HConnection hConnection,
+ final HRegionInfo region)
+ throws IOException, InterruptedException {
+ Pair pair =
+ MetaTableAccessor.getRegion(hConnection, region.getRegionName());
+ assertEquals(region.getEncodedName(),
+ pair.getFirst().getEncodedName());
+ }
+
+}
+
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableAccessorNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableAccessorNoCluster.java
new file mode 100644
index 0000000..9db56eb
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableAccessorNoCluster.java
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.catalog;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.NavigableMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * Test MetaTableAccessor but without spinning up a cluster.
+ * We mock regionserver back and forth (we do spin up a zk cluster).
+ */
+@Category(MediumTests.class)
+public class TestMetaTableAccessorNoCluster {
+ private static final Log LOG = LogFactory.getLog(TestMetaTableAccessorNoCluster.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final Abortable ABORTABLE = new Abortable() {
+ boolean aborted = false;
+ @Override
+ public void abort(String why, Throwable e) {
+ LOG.info(why, e);
+ this.aborted = true;
+ throw new RuntimeException(e);
+ }
+ @Override
+ public boolean isAborted() {
+ return this.aborted;
+ }
+ };
+
+ @Before
+ public void before() throws Exception {
+ UTIL.startMiniZKCluster();
+ }
+
+ @After
+ public void after() throws IOException {
+ UTIL.shutdownMiniZKCluster();
+ }
+
+ @Test
+ public void testGetHRegionInfo() throws IOException {
+ assertNull(HRegionInfo.getHRegionInfo(new Result()));
+
+ List kvs = new ArrayList();
+ Result r = Result.create(kvs);
+ assertNull(HRegionInfo.getHRegionInfo(r));
+
+ byte [] f = HConstants.CATALOG_FAMILY;
+ // Make a key value that doesn't have the expected qualifier.
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
+ HConstants.SERVER_QUALIFIER, f));
+ r = Result.create(kvs);
+ assertNull(HRegionInfo.getHRegionInfo(r));
+ // Make a key that does not have a regioninfo value.
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
+ HConstants.REGIONINFO_QUALIFIER, f));
+ HRegionInfo hri = HRegionInfo.getHRegionInfo(Result.create(kvs));
+ assertTrue(hri == null);
+ // OK, give it what it expects
+ kvs.clear();
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
+ HConstants.REGIONINFO_QUALIFIER,
+ HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
+ hri = HRegionInfo.getHRegionInfo(Result.create(kvs));
+ assertNotNull(hri);
+ assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
+ }
+
+ /**
+ * Test that MetaTableAccessor will ride over server throwing
+ * "Server not running" IOEs.
+ * @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testRideOverServerNotRunning()
+ throws IOException, InterruptedException, ServiceException {
+ // Need a zk watcher.
+ ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(),
+ this.getClass().getSimpleName(), ABORTABLE, true);
+ // This is a servername we use in a few places below.
+ ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
+
+ HConnection connection;
+ try {
+ // Mock an ClientProtocol. Our mock implementation will fail a few
+ // times when we go to open a scanner.
+ final ClientProtos.ClientService.BlockingInterface implementation =
+ Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+ // When scan called throw IOE 'Server not running' a few times
+ // before we return a scanner id. Whats WEIRD is that these
+ // exceptions do not show in the log because they are caught and only
+ // printed if we FAIL. We eventually succeed after retry so these don't
+ // show. We will know if they happened or not because we will ask
+ // mockito at the end of this test to verify that scan was indeed
+ // called the wanted number of times.
+ List kvs = new ArrayList| ();
+ final byte [] rowToVerify = Bytes.toBytes("rowToVerify");
+ kvs.add(new KeyValue(rowToVerify,
+ HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+ HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
+ kvs.add(new KeyValue(rowToVerify,
+ HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ Bytes.toBytes(sn.getHostAndPort())));
+ kvs.add(new KeyValue(rowToVerify,
+ HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+ Bytes.toBytes(sn.getStartcode())));
+ final List cellScannables = new ArrayList(1);
+ cellScannables.add(Result.create(kvs));
+ final ScanResponse.Builder builder = ScanResponse.newBuilder();
+ for (CellScannable result : cellScannables) {
+ builder.addCellsPerResult(((Result)result).size());
+ }
+ Mockito.when(implementation.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any()))
+ .thenThrow(new ServiceException("Server not running (1 of 3)"))
+ .thenThrow(new ServiceException("Server not running (2 of 3)"))
+ .thenThrow(new ServiceException("Server not running (3 of 3)"))
+ .thenReturn(ScanResponse.newBuilder().setScannerId(1234567890L).build())
+ .thenAnswer(new Answer() {
+ public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
+ ((PayloadCarryingRpcController) invocation.getArguments()[0]).setCellScanner(CellUtil
+ .createCellScanner(cellScannables));
+ return builder.build();
+ }
+ }).thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
+ // Associate a spied-upon HConnection with UTIL.getConfiguration. Need
+ // to shove this in here first so it gets picked up all over; e.g. by
+ // HTable.
+ connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration());
+ // Fix the location lookup so it 'works' though no network. First
+ // make an 'any location' object.
+ final HRegionLocation anyLocation =
+ new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn);
+ // Return the any location object when locateRegion is called in HTable
+ // constructor and when its called by ServerCallable (it uses getRegionLocation).
+ // The ugly format below comes of 'Important gotcha on spying real objects!' from
+ // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
+ Mockito.doReturn(anyLocation).
+ when(connection).locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any());
+ Mockito.doReturn(anyLocation).
+ when(connection).getRegionLocation((TableName) Mockito.any(),
+ (byte[]) Mockito.any(), Mockito.anyBoolean());
+
+ // Now shove our HRI implementation into the spied-upon connection.
+ Mockito.doReturn(implementation).
+ when(connection).getClient(Mockito.any(ServerName.class));
+
+ // Scan meta for user tables and verify we got back expected answer.
+ NavigableMap hris =
+ MetaTableAccessor.getServerUserRegions(connection, sn);
+ assertEquals(1, hris.size());
+ assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO));
+ assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
+ // Finally verify that scan was called four times -- three times
+ // with exception and then on 4th, 5th and 6th attempt we succeed
+ Mockito.verify(implementation, Mockito.times(6)).
+ scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any());
+ } finally {
+ HConnectionManager.deleteConnection(UTIL.getConfiguration());
+ zkw.close();
+ }
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableLocator.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableLocator.java
new file mode 100644
index 0000000..be1fcc3
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaTableLocator.java
@@ -0,0 +1,347 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.catalog;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.util.Progressable;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * Test {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}
+ */
+@Category(MediumTests.class)
+public class TestMetaTableLocator {
+ private static final Log LOG = LogFactory.getLog(TestMetaTableLocator.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final ServerName SN =
+ ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
+ private ZooKeeperWatcher watcher;
+ private Abortable abortable;
+
+ @BeforeClass public static void beforeClass() throws Exception {
+ // Set this down so tests run quicker
+ UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
+ UTIL.startMiniZKCluster();
+ }
+
+ @AfterClass public static void afterClass() throws IOException {
+ UTIL.getZkCluster().shutdown();
+ }
+
+ @Before public void before() throws IOException {
+ this.abortable = new Abortable() {
+ @Override
+ public void abort(String why, Throwable e) {
+ LOG.info(why, e);
+ }
+
+ @Override
+ public boolean isAborted() {
+ return false;
+ }
+ };
+ this.watcher = new ZooKeeperWatcher(UTIL.getConfiguration(),
+ this.getClass().getSimpleName(), this.abortable, true);
+ }
+
+ @After public void after() {
+ try {
+ // Clean out meta location or later tests will be confused... they presume
+ // start fresh in zk.
+ new MetaTableLocator().deleteMetaLocation(this.watcher);
+ } catch (KeeperException e) {
+ LOG.warn("Unable to delete hbase:meta location", e);
+ }
+
+ // Clear out our doctored connection or could mess up subsequent tests.
+ HConnectionManager.deleteConnection(UTIL.getConfiguration());
+
+ this.watcher.close();
+ }
+
+ /**
+ * Test interruptable while blocking wait on meta.
+ * @throws IOException
+ * @throws ServiceException
+ * @throws InterruptedException
+ */
+ @Test public void testInterruptWaitOnMeta()
+ throws IOException, InterruptedException, ServiceException {
+ final ClientProtos.ClientService.BlockingInterface client =
+ Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+
+ Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
+ thenReturn(GetResponse.newBuilder().build());
+
+ final MetaTableLocator mtl = new MetaTableLocator();
+ ServerName meta = new MetaTableLocator().getMetaRegionLocation(this.watcher);
+ Assert.assertNull(meta);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ mtl.waitMetaRegionLocation(watcher);
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Interrupted", e);
+ }
+ }
+ };
+ t.start();
+ while (!t.isAlive())
+ Threads.sleep(1);
+ Threads.sleep(1);
+ assertTrue(t.isAlive());
+ mtl.stop();
+ // Join the thread... should exit shortly.
+ t.join();
+ }
+
+ private void testVerifyMetaRegionLocationWithException(Exception ex)
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ // Mock an ClientProtocol.
+ final ClientProtos.ClientService.BlockingInterface implementation =
+ Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+ HConnection connection = mockConnection(null, implementation);
+
+ // If a 'get' is called on mocked interface, throw connection refused.
+ Mockito.when(implementation.get((RpcController) Mockito.any(), (GetRequest) Mockito.any())).
+ thenThrow(new ServiceException(ex));
+
+ MetaTableLocator.setMetaLocation(this.watcher, SN);
+ long timeout = UTIL.getConfiguration().
+ getLong("hbase.catalog.verification.timeout", 1000);
+ Assert.assertFalse(new MetaTableLocator().verifyMetaRegionLocation(
+ connection, watcher, timeout));
+ }
+
+ /**
+ * Test we survive a connection refused {@link ConnectException}
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ * @throws ServiceException
+ */
+ @Test
+ public void testGetMetaServerConnectionFails()
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ testVerifyMetaRegionLocationWithException(new ConnectException("Connection refused"));
+ }
+
+ /**
+ * Test that verifyMetaRegionLocation properly handles getting a
+ * ServerNotRunningException. See HBASE-4470.
+ * Note this doesn't check the exact exception thrown in the
+ * HBASE-4470 as there it is thrown from getHConnection() and
+ * here it is thrown from get() -- but those are both called
+ * from the same function anyway, and this way is less invasive than
+ * throwing from getHConnection would be.
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ * @throws ServiceException
+ */
+ @Test
+ public void testVerifyMetaRegionServerNotRunning()
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ testVerifyMetaRegionLocationWithException(new ServerNotRunningYetException("mock"));
+ }
+
+ /**
+ * Test get of meta region fails properly if nothing to connect to.
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ * @throws ServiceException
+ */
+ @Test
+ public void testVerifyMetaRegionLocationFails()
+ throws IOException, InterruptedException, KeeperException, ServiceException {
+ HConnection connection = Mockito.mock(HConnection.class);
+ ServiceException connectException =
+ new ServiceException(new ConnectException("Connection refused"));
+ final AdminProtos.AdminService.BlockingInterface implementation =
+ Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
+ Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
+ (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
+ Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())).
+ thenReturn(implementation);
+
+ MetaTableLocator.setMetaLocation(this.watcher,
+ ServerName.valueOf("example.com", 1234, System.currentTimeMillis()));
+ Assert.assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection, watcher, 100));
+ }
+
+ @Test (expected = NotAllMetaRegionsOnlineException.class)
+ public void testTimeoutWaitForMeta()
+ throws IOException, InterruptedException {
+ new MetaTableLocator().waitMetaRegionLocation(watcher, 100);
+ }
+
+ /**
+ * Test waiting on meat w/ no timeout specified.
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws KeeperException
+ */
+ @Test public void testNoTimeoutWaitForMeta()
+ throws IOException, InterruptedException, KeeperException {
+ final MetaTableLocator mtl = new MetaTableLocator();
+ ServerName hsa = mtl.getMetaRegionLocation(watcher);
+ Assert.assertNull(hsa);
+
+ // Now test waiting on meta location getting set.
+ Thread t = new WaitOnMetaThread();
+ startWaitAliveThenWaitItLives(t, 1);
+ // Set a meta location.
+ hsa = setMetaLocation();
+ // Join the thread... should exit shortly.
+ t.join();
+ // Now meta is available.
+ Assert.assertTrue(mtl.getMetaRegionLocation(watcher).equals(hsa));
+ }
+
+ private ServerName setMetaLocation() throws KeeperException {
+ MetaTableLocator.setMetaLocation(this.watcher, SN);
+ return SN;
+ }
+
+ /**
+ * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
+ * want to pass a mocked HRS; can be null.
+ * @param client A mocked ClientProtocol instance, can be null
+ * @return Mock up a connection that returns a {@link Configuration} when
+ * {@link HConnection#getConfiguration()} is called, a 'location' when
+ * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
+ * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
+ * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
+ * {@link ClientProtos.ClientService.BlockingInterface} instance when
+ * {@link HConnection#getClient(ServerName)} is called (Be sure to call
+ * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)}
+ * when done with this mocked Connection.
+ * @throws IOException
+ */
+ private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
+ final ClientProtos.ClientService.BlockingInterface client)
+ throws IOException {
+ HConnection connection =
+ HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
+ Mockito.doNothing().when(connection).close();
+ // Make it so we return any old location when asked.
+ final HRegionLocation anyLocation =
+ new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
+ Mockito.when(connection.getRegionLocation((TableName) Mockito.any(),
+ (byte[]) Mockito.any(), Mockito.anyBoolean())).
+ thenReturn(anyLocation);
+ Mockito.when(connection.locateRegion((TableName) Mockito.any(),
+ (byte[]) Mockito.any())).
+ thenReturn(anyLocation);
+ if (admin != null) {
+ // If a call to getHRegionConnection, return this implementation.
+ Mockito.when(connection.getAdmin(Mockito.any(ServerName.class))).
+ thenReturn(admin);
+ }
+ if (client != null) {
+ // If a call to getClient, return this implementation.
+ Mockito.when(connection.getClient(Mockito.any(ServerName.class))).
+ thenReturn(client);
+ }
+ return connection;
+ }
+
+ private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
+ t.start();
+ while(!t.isAlive()) {
+ // Wait
+ }
+ // Wait one second.
+ Threads.sleep(ms);
+ Assert.assertTrue("Assert " + t.getName() + " still waiting", t.isAlive());
+ }
+
+ /**
+ * Wait on META.
+ */
+ class WaitOnMetaThread extends Thread {
+
+ WaitOnMetaThread() {
+ super("WaitOnMeta");
+ }
+
+ @Override
+ public void run() {
+ try {
+ doWaiting();
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Failed wait", e);
+ }
+ LOG.info("Exiting " + getName());
+ }
+
+ void doWaiting() throws InterruptedException {
+ try {
+ while (new MetaTableLocator().waitMetaRegionLocation(watcher, 100) == null);
+ } catch (NotAllMetaRegionsOnlineException e) {
+ //Ignore
+ }
+ }
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
index 54b751d..caf3c61 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -1764,18 +1763,13 @@ public class TestAdmin {
final byte [] nameBytes = Bytes.toBytes(name);
HTable t = TEST_UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY);
TEST_UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
- CatalogTracker ct = new CatalogTracker(TEST_UTIL.getConfiguration());
- ct.start();
- try {
- HRegionLocation regionLocation = t.getRegionLocation("mmm");
- HRegionInfo region = regionLocation.getRegionInfo();
- byte[] regionName = region.getRegionName();
- Pair pair = admin.getRegion(regionName, ct);
- assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
- pair = admin.getRegion(region.getEncodedNameAsBytes(), ct);
- assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
- } finally {
- ct.stop();
- }
+
+ HRegionLocation regionLocation = t.getRegionLocation("mmm");
+ HRegionInfo region = regionLocation.getRegionInfo();
+ byte[] regionName = region.getRegionName();
+ Pair pair = admin.getRegion(regionName);
+ assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
+ pair = admin.getRegion(region.getEncodedNameAsBytes());
+ assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName()));
}
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
index 4e5a656..192ef19 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
@@ -40,8 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.StoppableImplementation;
import org.apache.hadoop.hbase.util.Threads;
@@ -50,6 +49,7 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+
@Category(MediumTests.class)
public class TestMetaScanner {
final Log LOG = LogFactory.getLog(getClass());
@@ -127,8 +127,6 @@ public class TestMetaScanner {
TableName.valueOf("testConcurrentMetaScannerAndCatalogJanitor");
final byte[] FAMILY = Bytes.toBytes("family");
TEST_UTIL.createTable(TABLENAME, FAMILY);
- final CatalogTracker catalogTracker = mock(CatalogTracker.class);
- when(catalogTracker.getConnection()).thenReturn(TEST_UTIL.getHBaseAdmin().getConnection());
class RegionMetaSplitter extends StoppableImplementation implements Runnable {
Random random = new Random();
@@ -169,7 +167,8 @@ public class TestMetaScanner {
Bytes.toBytes(midKey),
end);
- MetaEditor.splitRegion(catalogTracker, parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0));
+ MetaTableAccessor.splitRegion(TEST_UTIL.getHBaseAdmin().getConnection(),
+ parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0));
Threads.sleep(random.nextInt(200));
} catch (Throwable e) {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
index e971e4f..7fbd45f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LargeTests;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
@@ -165,7 +165,7 @@ public class TestScannerTimeout {
Scan scan = new Scan();
scan.setCaching(SCANNER_CACHING);
LOG.info("************ TEST3686A");
- MetaReader.fullScanMetaAndPrint(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker());
+ MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getHBaseAdmin().getConnection());
// Set a very high timeout, we want to test what happens when a RS
// fails but the region is recovered before the lease times out.
// Since the RS is already created, this conf is client-side only for
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
index ada3e6f..ec73d7d 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -152,7 +152,7 @@ public class TestRegionServerObserver {
mergedRegion = rmt.stepsBeforePONR(rs, rs, false);
rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(),
regionB.getRegionInfo(), rs.getServerName(), metaEntries);
- MetaEditor.mutateMetaTable(rs.getCatalogTracker(), metaEntries);
+ MetaTableAccessor.mutateMetaTable(rs.getShortCircuitConnection(), metaEntries);
}
@Override
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index f302f10..55a5763 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -45,10 +45,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.catalog.CatalogTracker;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.catalog.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -474,11 +473,12 @@ public class TestLoadIncrementalHFilesSplitRecovery {
dir = buildBulkFiles(tableName, 3);
// Mess it up by leaving a hole in the hbase:meta
- CatalogTracker ct = new CatalogTracker(util.getConfiguration());
- List | | | | | | | |