Index: src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java (revision 0)
+++ src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java (revision 0)
@@ -0,0 +1,89 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.zookeeper.KeeperException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestZKTable {
+ private static final Log LOG = LogFactory.getLog(TestZooKeeperNodeTracker.class);
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniZKCluster();
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniZKCluster();
+ }
+
+ @Test
+ public void testTableStates()
+ throws ZooKeeperConnectionException, IOException, KeeperException {
+ final String name = "testDisabled";
+ Abortable abortable = new Abortable() {
+ @Override
+ public void abort(String why, Throwable e) {
+ LOG.info(why, e);
+ }
+ };
+ ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
+ name, abortable);
+ ZKTable zkt = new ZKTable(zkw);
+ assertTrue(zkt.isEnabledTable(name));
+ assertFalse(zkt.isDisablingTable(name));
+ assertFalse(zkt.isDisabledTable(name));
+ assertFalse(zkt.isEnablingTable(name));
+ assertFalse(zkt.isDisablingOrDisabledTable(name));
+ assertFalse(zkt.isDisabledOrEnablingTable(name));
+ zkt.setDisablingTable(name);
+ assertTrue(zkt.isDisablingTable(name));
+ assertTrue(zkt.isDisablingOrDisabledTable(name));
+ assertFalse(zkt.getDisabledTables().contains(name));
+ zkt.setDisabledTable(name);
+ assertTrue(zkt.isDisabledTable(name));
+ assertTrue(zkt.isDisablingOrDisabledTable(name));
+ assertFalse(zkt.isDisablingTable(name));
+ assertTrue(zkt.getDisabledTables().contains(name));
+ zkt.setEnablingTable(name);
+ assertTrue(zkt.isEnablingTable(name));
+ assertTrue(zkt.isDisabledOrEnablingTable(name));
+ assertFalse(zkt.isDisabledTable(name));
+ assertFalse(zkt.getDisabledTables().contains(name));
+ zkt.setEnabledTable(name);
+ assertTrue(zkt.isEnabledTable(name));
+ assertFalse(zkt.isEnablingTable(name));
+ }
+}
Index: src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (revision 1033197)
+++ src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (working copy)
@@ -48,7 +48,7 @@
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTableDisable;
+import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
@@ -324,7 +324,8 @@
log("Beginning to mock scenarios");
// Disable the disabledTable in ZK
- ZKTableDisable.disableTable(zkw, Bytes.toString(disabledTable));
+ ZKTable zktable = new ZKTable(zkw);
+ zktable.setDisabledTable(Bytes.toString(disabledTable));
/*
* ZK = OFFLINE
@@ -652,7 +653,8 @@
log("Beginning to mock scenarios");
// Disable the disabledTable in ZK
- ZKTableDisable.disableTable(zkw, Bytes.toString(disabledTable));
+ ZKTable zktable = new ZKTable(zkw);
+ zktable.setDisabledTable(Bytes.toString(disabledTable));
/*
* ZK = CLOSING
Index: src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
===================================================================
--- src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java (revision 1033197)
+++ src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java (working copy)
@@ -20,7 +20,6 @@
package org.apache.hadoop.hbase.client;
-import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -46,10 +45,8 @@
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -85,6 +82,45 @@
}
@Test
+ public void testDisableAndEnableTable() throws IOException {
+ final byte [] row = Bytes.toBytes("row");
+ final byte [] qualifier = Bytes.toBytes("qualifier");
+ final byte [] value = Bytes.toBytes("value");
+ final byte [] table = Bytes.toBytes("testDisableAndEnableTable");
+ HTable ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY);
+ Put put = new Put(row);
+ put.add(HConstants.CATALOG_FAMILY, qualifier, value);
+ ht.put(put);
+ Get get = new Get(row);
+ get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
+ ht.get(get);
+
+ this.admin.disableTable(table);
+
+ // Test that table is disabled
+ get = new Get(row);
+ get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
+ boolean ok = false;
+ try {
+ ht.get(get);
+ } catch (NotServingRegionException e) {
+ ok = true;
+ } catch (RetriesExhaustedException e) {
+ ok = true;
+ }
+ assertTrue(ok);
+ this.admin.enableTable(table);
+
+ // Test that table is enabled
+ try {
+ ht.get(get);
+ } catch (RetriesExhaustedException e) {
+ ok = false;
+ }
+ assertTrue(ok);
+ }
+
+ @Test
public void testHBaseFsck() throws IOException {
HBaseFsck fsck =
new HBaseFsck(TEST_UTIL.getMiniHBaseCluster().getConfiguration());
@@ -430,45 +466,6 @@
}
@Test
- public void testDisableAndEnableTable() throws IOException {
- final byte [] row = Bytes.toBytes("row");
- final byte [] qualifier = Bytes.toBytes("qualifier");
- final byte [] value = Bytes.toBytes("value");
- final byte [] table = Bytes.toBytes("testDisableAndEnableTable");
- HTable ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY);
- Put put = new Put(row);
- put.add(HConstants.CATALOG_FAMILY, qualifier, value);
- ht.put(put);
- Get get = new Get(row);
- get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
- ht.get(get);
-
- this.admin.disableTable(table);
-
- // Test that table is disabled
- get = new Get(row);
- get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
- boolean ok = false;
- try {
- ht.get(get);
- } catch (NotServingRegionException e) {
- ok = true;
- } catch (RetriesExhaustedException e) {
- ok = true;
- }
- assertTrue(ok);
- this.admin.enableTable(table);
-
- //Test that table is enabled
- try {
- ht.get(get);
- } catch (RetriesExhaustedException e) {
- ok = false;
- }
- assertTrue(ok);
- }
-
- @Test
public void testTableExist() throws IOException {
final byte [] table = Bytes.toBytes("testTableExist");
boolean exist = false;
@@ -769,4 +766,4 @@
this.admin.deleteTable(tableName);
}
}
-}
+}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java (working copy)
@@ -1,70 +0,0 @@
-/**
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.util.List;
-
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Helper class for table disable tracking in zookeeper.
- *
- * The node /disabled will contain a child node for every table which should be
- * disabled, for example, /disabled/table.
- */
-public class ZKTableDisable {
-
- /**
- * Sets the specified table as disabled in zookeeper. Fails silently if the
- * table is already disabled in zookeeper. Sets no watches.
- * @param zkw
- * @param tableName
- * @throws KeeperException unexpected zookeeper exception
- */
- public static void disableTable(ZooKeeperWatcher zkw, String tableName)
- throws KeeperException {
- ZKUtil.createAndFailSilent(zkw, ZKUtil.joinZNode(zkw.tableZNode,
- tableName));
- }
-
- /**
- * Unsets the specified table as disabled in zookeeper. Fails silently if the
- * table is not currently disabled in zookeeper. Sets no watches.
- * @param zkw
- * @param tableName
- * @throws KeeperException unexpected zookeeper exception
- */
- public static void undisableTable(ZooKeeperWatcher zkw, String tableName)
- throws KeeperException {
- ZKUtil.deleteNodeFailSilent(zkw, ZKUtil.joinZNode(zkw.tableZNode,
- tableName));
- }
-
- /**
- * Gets a list of all the tables set as disabled in zookeeper.
- * @param zkw
- * @return list of disabled tables, empty list if none
- * @throws KeeperException
- */
- public static List getDisabledTables(ZooKeeperWatcher zkw)
- throws KeeperException {
- return ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
- }
-}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (working copy)
@@ -713,6 +713,23 @@
}
/**
+ * Set data into node creating node if it doesn't yet exist.
+ * Does not set watch.
+ * @param zkw zk reference
+ * @param znode path of node
+ * @param data data to set for node
+ * @throws KeeperException
+ */
+ public static void createSetData(final ZooKeeperWatcher zkw, final String znode,
+ final byte [] data)
+ throws KeeperException {
+ if (checkExists(zkw, znode) != -1) {
+ ZKUtil.createWithParents(zkw, znode);
+ }
+ ZKUtil.setData(zkw, znode, data);
+ }
+
+ /**
* Sets the data of the existing znode to be the specified data. The node
* must exist but no checks are done on the existing data or version.
*
@@ -902,8 +919,7 @@
* @param znode path of node
* @throws KeeperException if unexpected zookeeper exception
*/
- public static void createWithParents(ZooKeeperWatcher zkw,
- String znode)
+ public static void createWithParents(ZooKeeperWatcher zkw, String znode)
throws KeeperException {
try {
if(znode == null) {
Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java (revision 0)
@@ -0,0 +1,245 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Helper class for table state tracking.
+ *
+ * A znode will exist under the table directory if it is in any of the following
+ * states: {@link TableState#ENABLING} , {@link TableState#DISABLING},
+ * or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will
+ * be no entry for a table in zk. Thats how it currently works.
+ *
+ * To save on trips to the zookeeper ensemble, internally we cache table
+ * state.
+ */
+public class ZKTable {
+ private static final Log LOG = LogFactory.getLog(ZKTable.class);
+ private final ZooKeeperWatcher watcher;
+
+ /**
+ * Cache of what we found in zookeeper so we don't have to go to zk ensemble
+ * for every query. Synchronize access rather than use concurrent Map because
+ * synchronization needs to span query of zk.
+ */
+ private final Map cache =
+ new HashMap();
+
+ // TODO: Make it so always a table znode. Put table schema here as well as table state.
+ // Have watcher on table znode so all are notified of state or schema change.
+ /**
+ * States a Table can be in.
+ * {@link TableState#ENABLED} is not used currently; its the absence of state
+ * in zookeeper that indicates an enabled table currently.
+ */
+ public static enum TableState {
+ ENABLED,
+ DISABLED,
+ DISABLING,
+ ENABLING
+ };
+
+ public ZKTable(final ZooKeeperWatcher zkw) throws KeeperException {
+ super();
+ this.watcher = zkw;
+ populateTableStates();
+ }
+
+ /**
+ * Gets a list of all the tables set as disabled in zookeeper.
+ * @param zkw
+ * @return list of disabled tables, empty list if none
+ * @throws KeeperException
+ */
+ private void populateTableStates()
+ throws KeeperException {
+ synchronized (this.cache) {
+ List children =
+ ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode);
+ for (String child: children) {
+ String znode = ZKUtil.joinZNode(this.watcher.tableZNode, child);
+ byte [] data = ZKUtil.getData(this.watcher, znode);
+ if (data == null || data.length <= 0) {
+ LOG.warn("Unexpected null or empty data in " + znode);
+ continue;
+ }
+ TableState state = null;
+ String str = Bytes.toString(data);
+ try {
+ state = TableState.valueOf(str);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException(str);
+ }
+ this.cache.put(child, state);
+ }
+ }
+ }
+
+ /**
+ * Sets the specified table as DISABLED in zookeeper. Fails silently if the
+ * table is already disabled in zookeeper. Sets no watches.
+ * @param tableName
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public void setDisabledTable(String tableName)
+ throws KeeperException {
+ synchronized (this.cache) {
+ if (!isDisablingOrDisabledTable(tableName)) {
+ LOG.warn("Moving table " + tableName + " state to disabled but was " +
+ "not first in disabling state: " + this.cache.get(tableName));
+ }
+ setTableState(tableName, TableState.DISABLED);
+ }
+ }
+
+ /**
+ * Sets the specified table as DISABLING in zookeeper. Fails silently if the
+ * table is already disabled in zookeeper. Sets no watches.
+ * @param tableName
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public void setDisablingTable(final String tableName)
+ throws KeeperException {
+ synchronized (this.cache) {
+ if (!isEnabledOrDisablingTable(tableName)) {
+ LOG.warn("Moving table " + tableName + " state to disabling but was " +
+ "not first in enabled state: " + this.cache.get(tableName));
+ }
+ setTableState(tableName, TableState.DISABLING);
+ }
+ }
+
+ /**
+ * Sets the specified table as ENABLING in zookeeper. Fails silently if the
+ * table is already disabled in zookeeper. Sets no watches.
+ * @param tableName
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public void setEnablingTable(final String tableName)
+ throws KeeperException {
+ synchronized (this.cache) {
+ if (!isDisabledOrEnablingTable(tableName)) {
+ LOG.warn("Moving table " + tableName + " state to disabling but was " +
+ "not first in enabled state: " + this.cache.get(tableName));
+ }
+ setTableState(tableName, TableState.ENABLING);
+ }
+ }
+
+ private void setTableState(final String tableName, final TableState state)
+ throws KeeperException {
+ String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName);
+ if (ZKUtil.checkExists(this.watcher, znode) == -1) {
+ ZKUtil.createAndFailSilent(this.watcher, znode);
+ }
+ synchronized (this.cache) {
+ ZKUtil.setData(this.watcher, znode, Bytes.toBytes(state.toString()));
+ this.cache.put(tableName, state);
+ }
+ }
+
+ public boolean isDisabledTable(final String tableName) {
+ return isTableState(tableName, TableState.DISABLED);
+ }
+
+ public boolean isDisablingTable(final String tableName) {
+ return isTableState(tableName, TableState.DISABLING);
+ }
+
+ public boolean isEnablingTable(final String tableName) {
+ return isTableState(tableName, TableState.ENABLING);
+ }
+
+ public boolean isEnabledTable(String tableName) {
+ synchronized (this.cache) {
+ // No entry in cache means enabled table.
+ return !this.cache.containsKey(tableName);
+ }
+ }
+
+ public boolean isDisablingOrDisabledTable(final String tableName) {
+ synchronized (this.cache) {
+ return isDisablingTable(tableName) || isDisabledTable(tableName);
+ }
+ }
+
+ public boolean isEnabledOrDisablingTable(final String tableName) {
+ synchronized (this.cache) {
+ return isEnabledTable(tableName) || isDisablingTable(tableName);
+ }
+ }
+
+ public boolean isDisabledOrEnablingTable(final String tableName) {
+ synchronized (this.cache) {
+ return isDisabledTable(tableName) || isEnablingTable(tableName);
+ }
+ }
+
+ private boolean isTableState(final String tableName, final TableState state) {
+ synchronized (this.cache) {
+ TableState currentState = this.cache.get(tableName);
+ return currentState != null && currentState.equals(state);
+ }
+ }
+
+ /**
+ * Enables the table in zookeeper. Fails silently if the
+ * table is not currently disabled in zookeeper. Sets no watches.
+ * @param tableName
+ * @throws KeeperException unexpected zookeeper exception
+ */
+ public void setEnabledTable(final String tableName)
+ throws KeeperException {
+ synchronized (this.cache) {
+ if (this.cache.remove(tableName) == null) {
+ LOG.warn("Moving table " + tableName + " state to enabled but was " +
+ "already enabled");
+ }
+ ZKUtil.deleteNodeFailSilent(this.watcher,
+ ZKUtil.joinZNode(this.watcher.tableZNode, tableName));
+ }
+ }
+
+ /**
+ * Gets a list of all the tables set as disabled in zookeeper.
+ * @return Set of disabled tables, empty Set if none
+ */
+ public Set getDisabledTables() {
+ Set disabledTables = new HashSet();
+ synchronized (this.cache) {
+ Set tables = this.cache.keySet();
+ for (String table: tables) {
+ if (isDisabledTable(table)) disabledTables.add(table);
+ }
+ }
+ return disabledTables;
+ }
+}
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -55,12 +55,12 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
@@ -73,7 +73,6 @@
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -2537,47 +2536,6 @@
}
/**
- * Utility method used by HMaster marking regions offlined.
- * @param srvr META server to be updated
- * @param metaRegionName Meta region name
- * @param info HRegion to update in meta
- *
- * @throws IOException
- */
- public static void offlineRegionInMETA(final HRegionInterface srvr,
- final byte [] metaRegionName, final HRegionInfo info)
- throws IOException {
- // Puts and Deletes used to be "atomic" here. We can use row locks if
- // we need to keep that property, or we can expand Puts and Deletes to
- // allow them to be committed at once.
- byte [] row = info.getRegionName();
- Put put = new Put(row);
- info.setOffline(true);
- put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- Writables.getBytes(info));
- srvr.put(metaRegionName, put);
- cleanRegionInMETA(srvr, metaRegionName, info);
- }
-
- /**
- * Clean COL_SERVER and COL_STARTCODE for passed info in
- * .META.
- * @param srvr
- * @param metaRegionName
- * @param info
- * @throws IOException
- */
- public static void cleanRegionInMETA(final HRegionInterface srvr,
- final byte [] metaRegionName, final HRegionInfo info)
- throws IOException {
- Delete del = new Delete(info.getRegionName());
- del.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
- del.deleteColumns(HConstants.CATALOG_FAMILY,
- HConstants.STARTCODE_QUALIFIER);
- srvr.delete(metaRegionName, del);
- }
-
- /**
* Deletes all the files for a HRegion
*
* @param fs the file system object
Index: src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (working copy)
@@ -137,6 +137,9 @@
new HRegionInfo(1L, HTableDescriptor.META_TABLEDESC);
private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
+ // This flag is in the parent of a split while the parent is still referenced
+ // by daughter regions. We USED to set this flag when we disabled a table
+ // but now table state is kept up in zookeeper as of 0.90.0 HBase.
private boolean offLine = false;
private long regionId = -1;
private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY;
@@ -533,7 +536,9 @@
}
/**
- * @param offLine set online - offline status
+ * The parent of a region split is offline while split daughters hold
+ * references to the parent. Offlined regions are closed.
+ * @param offLine Set online/offline status.
*/
public void setOffline(boolean offLine) {
this.offLine = offLine;
Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy)
@@ -795,13 +795,13 @@
}
public void enableTable(final byte [] tableName) throws IOException {
- new EnableTableHandler(this, tableName, catalogTracker, assignmentManager)
- .process();
+ this.executorService.submit(new EnableTableHandler(this, tableName,
+ catalogTracker, assignmentManager));
}
public void disableTable(final byte [] tableName) throws IOException {
- new DisableTableHandler(this, tableName, catalogTracker, assignmentManager)
- .process();
+ this.executorService.submit(new DisableTableHandler(this, tableName,
+ catalogTracker, assignmentManager));
}
/**
@@ -857,7 +857,8 @@
if (!MetaReader.tableExists(getCatalogTracker(), tableNameStr)) {
throw new TableNotFoundException(tableNameStr);
}
- if (!getAssignmentManager().isTableDisabled(Bytes.toString(tableName))) {
+ if (!getAssignmentManager().getZKTable().
+ isDisabledTable(Bytes.toString(tableName))) {
throw new TableNotDisabledException(tableName);
}
}
Index: src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy)
@@ -22,12 +22,9 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
-import java.lang.Thread.UncaughtExceptionHandler;
import java.net.ConnectException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -38,7 +35,6 @@
import java.util.TreeSet;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
@@ -56,9 +52,9 @@
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.catalog.RootLocationEditor;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.RegionTransitionData;
-import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
@@ -67,11 +63,11 @@
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZKTableDisable;
+import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.AsyncCallback;
@@ -79,8 +75,6 @@
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.data.Stat;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
/**
* Manages and performs region assignment.
*
@@ -115,9 +109,7 @@
final ConcurrentNavigableMap regionPlans =
new ConcurrentSkipListMap();
- /** Set of tables that have been disabled. */
- private final Set disabledTables =
- Collections.synchronizedSet(new HashSet());
+ private final ZKTable zkTable;
/**
* Server to regions assignment map.
@@ -150,9 +142,11 @@
* @param serverManager
* @param catalogTracker
* @param service
+ * @throws KeeperException
*/
public AssignmentManager(Server master, ServerManager serverManager,
- CatalogTracker catalogTracker, final ExecutorService service) {
+ CatalogTracker catalogTracker, final ExecutorService service)
+ throws KeeperException {
super(master.getZooKeeper());
this.master = master;
this.serverManager = serverManager;
@@ -160,14 +154,24 @@
this.executorService = service;
Configuration conf = master.getConfiguration();
this.timeoutMonitor = new TimeoutMonitor(
- conf.getInt("hbase.master.assignment.timeoutmonitor.period", 10000),
- master,
- conf.getInt("hbase.master.assignment.timeoutmonitor.timeout", 30000));
+ conf.getInt("hbase.master.assignment.timeoutmonitor.period", 10000),
+ master,
+ conf.getInt("hbase.master.assignment.timeoutmonitor.timeout", 30000));
Threads.setDaemonThreadRunning(timeoutMonitor,
- master.getServerName() + ".timeoutMonitor");
+ master.getServerName() + ".timeoutMonitor");
+ this.zkTable = new ZKTable(this.master.getZooKeeper());
}
/**
+ * @return Instance of ZKTable.
+ */
+ public ZKTable getZKTable() {
+ // These are 'expensive' to make involving trip to zk ensemble so allow
+ // sharing.
+ return this.zkTable;
+ }
+
+ /**
* Reset all unassigned znodes. Called on startup of master.
* Call {@link #assignAllUserRegions()} after root and meta have been assigned.
* @throws IOException
@@ -200,8 +204,6 @@
// Returns servers who have not checked in (assumed dead) and their regions
Map>> deadServers =
rebuildUserRegions();
- // Pickup any disabled tables from ZK
- rebuildDisabledTables();
// Process list of dead servers
processDeadServers(deadServers);
// Check existing regions in transition
@@ -593,14 +595,10 @@
public void setOffline(HRegionInfo regionInfo) {
synchronized (this.regions) {
HServerInfo serverInfo = this.regions.remove(regionInfo);
- if (serverInfo != null) {
- List serverRegions = this.servers.get(serverInfo);
- if (!serverRegions.remove(regionInfo)) {
- LOG.warn("Asked offline a region that was not on expected server: " +
- regionInfo + ", " + serverInfo.getServerName());
- }
- } else {
- LOG.warn("Asked offline a region that was not online: " + regionInfo);
+ if (serverInfo == null) return;
+ List serverRegions = this.servers.get(serverInfo);
+ if (!serverRegions.remove(regionInfo)) {
+ LOG.warn("No " + regionInfo + " on " + serverInfo);
}
}
}
@@ -651,9 +649,10 @@
public void assign(HRegionInfo region, boolean setOfflineInZK,
boolean forceNewPlan) {
String tableName = region.getTableDesc().getNameAsString();
- if (isTableDisabled(tableName)) {
- LOG.info("Table " + tableName + " disabled; skipping assign of " +
- region.getRegionNameAsString());
+ boolean disabled = this.zkTable.isDisabledTable(tableName);
+ if (disabled || this.zkTable.isDisablingTable(tableName)) {
+ LOG.info("Table " + tableName + (disabled? " disabled;": " disabling;") +
+ " skipping assign of " + region.getRegionNameAsString());
offlineDisabledRegion(region);
return;
}
@@ -674,7 +673,7 @@
* @param destination
* @param regions Regions to assign.
*/
- public void assign(final HServerInfo destination,
+ void assign(final HServerInfo destination,
final List regions) {
LOG.debug("Bulk assigning " + regions.size() + " region(s) to " +
destination.getServerName());
@@ -958,7 +957,7 @@
* Updates the RegionState and sends the CLOSE RPC.
*
* If a RegionPlan is already set, it will remain. If this is being used
- * to disable a table, be sure to use {@link #disableTable(String)} to ensure
+ * to disable a table, be sure to use {@link #setDisabledTable(String)} to ensure
* regions are not onlined after being closed.
*
* @param regionName server to be unassigned
@@ -973,7 +972,7 @@
* Updates the RegionState and sends the CLOSE RPC.
*
* If a RegionPlan is already set, it will remain. If this is being used
- * to disable a table, be sure to use {@link #disableTable(String)} to ensure
+ * to disable a table, be sure to use {@link #setDisabledTable(String)} to ensure
* regions are not onlined after being closed.
*
* @param regionName server to be unassigned
@@ -1104,23 +1103,23 @@
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown and the cluster
* should be shutdown.
+ * @throws InterruptedException
+ * @throws IOException
*/
- public void assignAllUserRegions() throws IOException {
-
- Map> bulkPlan = null;
-
+ public void assignAllUserRegions() throws IOException, InterruptedException {
// Get all available servers
List servers = serverManager.getOnlineServersList();
// Scan META for all user regions, skipping any disabled tables
Map allRegions =
- MetaReader.fullScan(catalogTracker, disabledTables);
+ MetaReader.fullScan(catalogTracker, this.zkTable.getDisabledTables());
if (allRegions == null || allRegions.isEmpty()) return;
// Determine what type of assignment to do on startup
- boolean retainAssignment = master.getConfiguration().getBoolean(
- "hbase.master.startup.retainassign", true);
+ boolean retainAssignment = master.getConfiguration().
+ getBoolean("hbase.master.startup.retainassign", true);
+ Map> bulkPlan = null;
if (retainAssignment) {
// Reuse existing assignment info
bulkPlan = LoadBalancer.retainAssignment(allRegions, servers);
@@ -1129,72 +1128,108 @@
bulkPlan = LoadBalancer.roundRobinAssignment(
new ArrayList(allRegions.keySet()), servers);
}
-
LOG.info("Bulk assigning " + allRegions.size() + " region(s) across " +
- servers.size() + " server(s)");
+ servers.size() + " server(s), retainAssignment=" + retainAssignment);
- // Make a fixed thread count pool to run bulk assignments. Thought is that
- // if a 1k cluster, running 1k bulk concurrent assignment threads will kill
- // master, HDFS or ZK?
- ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
- builder.setDaemon(true);
- builder.setNameFormat(this.master.getServerName() + "-BulkAssigner-%1$d");
- builder.setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
- @Override
- public void uncaughtException(Thread t, Throwable e) {
- // Abort if exception of any kind.
- master.abort("Uncaught exception bulk assigning in " + t.getName(), e);
- }
- });
- int threadCount =
- this.master.getConfiguration().getInt("hbase.bulk.assignment.threadpool.size", 20);
- java.util.concurrent.ExecutorService pool =
- Executors.newFixedThreadPool(threadCount, builder.build());
- // Disable timing out regions in transition up in zk while bulk assigning.
- this.timeoutMonitor.bulkAssign(true);
- try {
- for (Map.Entry> e: bulkPlan.entrySet()) {
- pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue()));
- }
- // Wait for no regions to be in transition
+ // Use fixed count thread pool assigning.
+ BulkAssigner ba = new BulkStartupAssigner(this.master, bulkPlan, this);
+ ba.bulkAssign();
+ LOG.info("Bulk assigning done");
+ }
+
+ /**
+ * Run bulk assign on startup.
+ */
+ static class BulkStartupAssigner extends BulkAssigner {
+ private final Map> bulkPlan;
+ private final AssignmentManager assignmentManager;
+
+ BulkStartupAssigner(final Server server,
+ final Map> bulkPlan,
+ final AssignmentManager am) {
+ super(server);
+ this.bulkPlan = bulkPlan;
+ this.assignmentManager = am;
+ }
+
+ @Override
+ public boolean bulkAssign() throws InterruptedException {
+ // Disable timing out regions in transition up in zk while bulk assigning.
+ this.assignmentManager.timeoutMonitor.bulkAssign(true);
try {
- // How long to wait on empty regions-in-transition. When we timeout,
- // we'll put back in place the monitor of R-I-T. It should do fixup
- // if server crashed during bulk assign, etc.
- long timeout =
- this.master.getConfiguration().getInt("hbase.bulk.assignment.waiton.empty.rit", 10 * 60 * 1000);
- waitUntilNoRegionsInTransition(timeout);
- } catch (InterruptedException e) {
- LOG.error("Interrupted waiting for regions to be assigned", e);
- throw new IOException(e);
+ return super.bulkAssign();
+ } finally {
+ // Reenable timing out regions in transition up in zi.
+ this.assignmentManager.timeoutMonitor.bulkAssign(false);
}
- } finally {
- // We're done with the pool. It'll exit when its done all in queue.
- pool.shutdown();
- // Reenable timing out regions in transition up in zi.
- this.timeoutMonitor.bulkAssign(false);
}
- LOG.info("Bulk assigning done");
+
+ @Override
+ protected String getThreadNamePrefix() {
+ return super.getThreadNamePrefix() + "-startup";
+ }
+
+ @Override
+ protected void populatePool(java.util.concurrent.ExecutorService pool) {
+ for (Map.Entry> e: this.bulkPlan.entrySet()) {
+ pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue(),
+ this.assignmentManager));
+ }
+ }
+
+ protected boolean waitUntilDone(final long timeout)
+ throws InterruptedException {
+ return this.assignmentManager.waitUntilNoRegionsInTransition(timeout);
+ }
}
/**
* Manage bulk assigning to a server.
*/
- class SingleServerBulkAssigner implements Runnable {
+ static class SingleServerBulkAssigner implements Runnable {
private final HServerInfo regionserver;
private final List regions;
+ private final AssignmentManager assignmentManager;
+
SingleServerBulkAssigner(final HServerInfo regionserver,
- final List regions) {
+ final List regions, final AssignmentManager am) {
this.regionserver = regionserver;
this.regions = regions;
+ this.assignmentManager = am;
}
@Override
public void run() {
- assign(this.regionserver, this.regions);
+ this.assignmentManager.assign(this.regionserver, this.regions);
}
}
/**
+ * Wait until no regions in transition.
+ * @param timeout How long to wait.
+ * @return True if nothing in regions in transition.
+ * @throws InterruptedException
+ */
+ boolean waitUntilNoRegionsInTransition(final long timeout)
+ throws InterruptedException {
+ // Blocks until there are no regions in transition. It is possible that
+ // there
+ // are regions in transition immediately after this returns but guarantees
+ // that if it returns without an exception that there was a period of time
+ // with no regions in transition from the point-of-view of the in-memory
+ // state of the Master.
+ long startTime = System.currentTimeMillis();
+ long remaining = timeout;
+ synchronized (regionsInTransition) {
+ while (regionsInTransition.size() > 0 && !this.master.isStopped()
+ && remaining > 0) {
+ regionsInTransition.wait(remaining);
+ remaining = timeout - (System.currentTimeMillis() - startTime);
+ }
+ }
+ return regionsInTransition.isEmpty();
+ }
+
+ /**
* Rebuild the list of user regions and assignment information.
*
* Returns a map of servers that are not found to be online and the regions
@@ -1291,28 +1326,6 @@
}
/**
- * Blocks until there are no regions in transition. It is possible that there
- * are regions in transition immediately after this returns but guarantees
- * that if it returns without an exception that there was a period of time
- * with no regions in transition from the point-of-view of the in-memory
- * state of the Master.
- * @param timeout How long to wait on empty regions-in-transition.
- * @throws InterruptedException
- */
- public void waitUntilNoRegionsInTransition(final long timeout)
- throws InterruptedException {
- long startTime = System.currentTimeMillis();
- long remaining = timeout;
- synchronized (this.regionsInTransition) {
- while(this.regionsInTransition.size() > 0 &&
- !this.master.isStopped() && remaining > 0) {
- this.regionsInTransition.wait(remaining);
- remaining = timeout - (System.currentTimeMillis() - startTime);
- }
- }
- }
-
- /**
* @return A copy of the Map of regions currently in transition.
*/
public NavigableMap getRegionsInTransition() {
@@ -1370,18 +1383,7 @@
}
/**
- * Checks if the specified table has been disabled by the user.
- * @param tableName
- * @return
- */
- public boolean isTableDisabled(String tableName) {
- synchronized(disabledTables) {
- return disabledTables.contains(tableName);
- }
- }
-
- /**
- * Wait on regions to clean regions-in-transition.
+ * Wait on region to clear regions-in-transition.
* @param hri Region to wait on.
* @throws IOException
*/
@@ -1401,82 +1403,22 @@
}
}
- /**
- * Checks if the table of the specified region has been disabled by the user.
- * @param regionName
- * @return
- */
- public boolean isTableOfRegionDisabled(byte [] regionName) {
- return isTableDisabled(Bytes.toString(
- HRegionInfo.getTableName(regionName)));
- }
/**
- * Sets the specified table to be disabled.
- * @param tableName table to be disabled
- */
- public void disableTable(String tableName) {
- synchronized(disabledTables) {
- if(!isTableDisabled(tableName)) {
- disabledTables.add(tableName);
- try {
- ZKTableDisable.disableTable(master.getZooKeeper(), tableName);
- } catch (KeeperException e) {
- LOG.warn("ZK error setting table as disabled", e);
- }
- }
- }
- }
-
- /**
- * Unsets the specified table from being disabled.
- *
- * This operation only acts on the in-memory
- * @param tableName table to be undisabled
- */
- public void undisableTable(String tableName) {
- synchronized(disabledTables) {
- if(isTableDisabled(tableName)) {
- disabledTables.remove(tableName);
- try {
- ZKTableDisable.undisableTable(master.getZooKeeper(), tableName);
- } catch (KeeperException e) {
- LOG.warn("ZK error setting table as disabled", e);
- }
- }
- }
- }
-
- /**
- * Rebuild the set of disabled tables from zookeeper. Used during master
- * failover.
- */
- private void rebuildDisabledTables() {
- synchronized(disabledTables) {
- List disabledTables;
- try {
- disabledTables = ZKTableDisable.getDisabledTables(master.getZooKeeper());
- } catch (KeeperException e) {
- LOG.warn("ZK error getting list of disabled tables", e);
- return;
- }
- if(!disabledTables.isEmpty()) {
- LOG.info("Rebuilt list of " + disabledTables.size() + " disabled " +
- "tables from zookeeper");
- this.disabledTables.addAll(disabledTables);
- }
- }
- }
-
- /**
* Gets the online regions of the specified table.
+ * This method looks at the in-memory state. It does not go to .META..
+ * Only returns online regions. If a region on this table has been
+ * closed during a disable, etc., it will be included in the returned list.
+ * So, the returned list may not necessarily be ALL regions in this table, its
+ * all the ONLINE regions in the table.
* @param tableName
- * @return
+ * @return Online regions from tableName
*/
public List getRegionsOfTable(byte[] tableName) {
List tableRegions = new ArrayList();
- for(HRegionInfo regionInfo : regions.tailMap(new HRegionInfo(
- new HTableDescriptor(tableName), null, null)).keySet()) {
+ HRegionInfo boundary =
+ new HRegionInfo(new HTableDescriptor(tableName), null, null);
+ for (HRegionInfo regionInfo: this.regions.tailMap(boundary).keySet()) {
if(Bytes.equals(regionInfo.getTableDesc().getName(), tableName)) {
tableRegions.add(regionInfo);
} else {
Index: src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (working copy)
@@ -404,7 +404,8 @@
assignments.put(server, new ArrayList());
}
for (Map.Entry region : regions.entrySet()) {
- HServerInfo server = serverMap.get(region.getValue());
+ HServerAddress hsa = region.getValue();
+ HServerInfo server = hsa == null? null: serverMap.get(hsa);
if (server != null) {
assignments.get(server).add(region.getKey());
} else {
Index: src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java (working copy)
@@ -167,7 +167,7 @@
AssignmentManager assignmentManager, CatalogTracker catalogTracker)
throws IOException {
// If table is not disabled but the region is offlined,
- boolean disabled = assignmentManager.isTableDisabled(
+ boolean disabled = assignmentManager.getZKTable().isDisabledTable(
hri.getTableDesc().getNameAsString());
if (disabled) return false;
if (hri.isOffline() && hri.isSplit()) {
Index: src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java (working copy)
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
/**
* Base class for performing operations against tables.
@@ -42,6 +43,7 @@
private static final Log LOG = LogFactory.getLog(TableEventHandler.class);
protected final MasterServices masterServices;
protected final byte [] tableName;
+ protected final String tableNameStr;
public TableEventHandler(EventType eventType, byte [] tableName, Server server,
MasterServices masterServices)
@@ -50,6 +52,7 @@
this.masterServices = masterServices;
this.tableName = tableName;
this.masterServices.checkTableModifiable(tableName);
+ this.tableNameStr = Bytes.toString(this.tableName);
}
@Override
@@ -62,11 +65,12 @@
tableName);
handleTableOperation(hris);
} catch (IOException e) {
- LOG.error("Error trying to delete the table " + Bytes.toString(tableName),
- e);
+ LOG.error("Error manipulating table " + Bytes.toString(tableName), e);
+ } catch (KeeperException e) {
+ LOG.error("Error manipulating table " + Bytes.toString(tableName), e);
}
}
protected abstract void handleTableOperation(List regions)
- throws IOException;
+ throws IOException, KeeperException;
}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (working copy)
@@ -31,20 +31,20 @@
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.ZKTableDisable;
import org.apache.zookeeper.KeeperException;
public class DeleteTableHandler extends TableEventHandler {
private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class);
public DeleteTableHandler(byte [] tableName, Server server,
- final MasterServices masterServices) throws IOException {
+ final MasterServices masterServices)
+ throws IOException {
super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices);
}
@Override
protected void handleTableOperation(List regions)
- throws IOException {
+ throws IOException, KeeperException {
AssignmentManager am = this.masterServices.getAssignmentManager();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
@@ -73,6 +73,6 @@
// If entry for this table in zk, and up in AssignmentManager, remove it.
// Call to undisableTable does this. TODO: Make a more formal purge table.
- am.undisableTable(Bytes.toString(tableName));
+ am.getZKTable().setEnabledTable(Bytes.toString(tableName));
}
}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (working copy)
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,12 +32,15 @@
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkAssigner;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
-
+/**
+ * Handler to run disable of a table.
+ */
public class DisableTableHandler extends EventHandler {
private static final Log LOG = LogFactory.getLog(DisableTableHandler.class);
-
private final byte [] tableName;
private final String tableNameStr;
private final AssignmentManager assignmentManager;
@@ -52,7 +56,7 @@
// TODO: do we want to keep this in-memory as well? i guess this is
// part of old master rewrite, schema to zk to check for table
// existence and such
- if(!MetaReader.tableExists(catalogTracker, this.tableNameStr)) {
+ if (!MetaReader.tableExists(catalogTracker, this.tableNameStr)) {
throw new TableNotFoundException(Bytes.toString(tableName));
}
}
@@ -60,32 +64,90 @@
@Override
public void process() {
try {
- LOG.info("Attemping to disable the table " + this.tableNameStr);
+ LOG.info("Attemping to disable table " + this.tableNameStr);
handleDisableTable();
} catch (IOException e) {
- LOG.error("Error trying to disable the table " + this.tableNameStr, e);
+ LOG.error("Error trying to disable table " + this.tableNameStr, e);
+ } catch (KeeperException e) {
+ LOG.error("Error trying to disable table " + this.tableNameStr, e);
}
}
- private void handleDisableTable() throws IOException {
- if (this.assignmentManager.isTableDisabled(this.tableNameStr)) {
- LOG.info("Table " + tableNameStr + " is already disabled; skipping disable");
+ private void handleDisableTable() throws IOException, KeeperException {
+ if (this.assignmentManager.getZKTable().isDisabledTable(this.tableNameStr)) {
+ LOG.info("Table " + tableNameStr + " already disabled; skipping disable");
return;
}
- // Set the table as disabled so it doesn't get re-onlined
- assignmentManager.disableTable(this.tableNameStr);
- // Get the online regions of this table.
- // TODO: What if region splitting at the time we get this listing?
- // TODO: Remove offline flag from HRI
- // TODO: Confirm we have parallel closing going on.
- List regions = assignmentManager.getRegionsOfTable(tableName);
- // Unassign the online regions
- for(HRegionInfo region: regions) {
- assignmentManager.unassign(region);
+ // Set table disabling flag up in zk.
+ this.assignmentManager.getZKTable().setDisablingTable(this.tableNameStr);
+ boolean done = false;
+ while (true) {
+ // Get list of online regions that are of this table. Regions that are
+ // already closed will not be included in this list; i.e. the returned
+ // list is not ALL regions in a table, its all online regions according to
+ // the in-memory state on this master.
+ final List regions =
+ this.assignmentManager.getRegionsOfTable(tableName);
+ if (regions.size() == 0) {
+ done = true;
+ break;
+ }
+ LOG.info("Offlining " + regions.size() + " regions.");
+ BulkDisabler bd = new BulkDisabler(this.server, regions);
+ try {
+ if (bd.bulkAssign()) {
+ done = true;
+ break;
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Disable was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ break;
+ }
}
- // Wait on table's regions to clear region in transition.
- for (HRegionInfo region: regions) {
- this.assignmentManager.waitOnRegionToClearRegionsInTransition(region);
+ // Flip the table to disabled if success.
+ if (done) this.assignmentManager.getZKTable().setDisabledTable(this.tableNameStr);
+ LOG.info("Disabled table is done=" + done);
+ }
+
+ /**
+ * Run bulk disable.
+ */
+ class BulkDisabler extends BulkAssigner {
+ private final List regions;
+
+ BulkDisabler(final Server server, final List regions) {
+ super(server);
+ this.regions = regions;
}
+
+ @Override
+ protected void populatePool(ExecutorService pool) {
+ for (HRegionInfo region: regions) {
+ if (assignmentManager.isRegionInTransition(region) != null) continue;
+ final HRegionInfo hri = region;
+ pool.execute(new Runnable() {
+ public void run() {
+ assignmentManager.unassign(hri);
+ }
+ });
+ }
+ }
+
+ @Override
+ protected boolean waitUntilDone(long timeout)
+ throws InterruptedException {
+ long startTime = System.currentTimeMillis();
+ long remaining = timeout;
+ List regions = null;
+ while (!server.isStopped() && remaining > 0) {
+ Thread.sleep(500);
+ regions = assignmentManager.getRegionsOfTable(tableName);
+ if (regions.isEmpty()) break;
+ remaining = timeout - (System.currentTimeMillis() - startTime);
+ }
+ return regions != null && regions.isEmpty();
+ }
}
}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java (working copy)
@@ -36,10 +36,8 @@
*/
public class ClosedRegionHandler extends EventHandler implements TotesHRegionInfo {
private static final Log LOG = LogFactory.getLog(ClosedRegionHandler.class);
-
private final AssignmentManager assignmentManager;
private final HRegionInfo regionInfo;
-
private final ClosedPriority priority;
private enum ClosedPriority {
@@ -84,7 +82,8 @@
public void process() {
LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
// Check if this table is being disabled or not
- if (assignmentManager.isTableOfRegionDisabled(regionInfo.getRegionName())) {
+ if (this.assignmentManager.getZKTable().
+ isDisabledTable(this.regionInfo.getTableDesc().getNameAsString())) {
assignmentManager.offlineDisabledRegion(regionInfo);
return;
}
Index: src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (working copy)
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,12 +32,15 @@
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkAssigner;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
-
+/**
+ * Handler to run enable of a table.
+ */
public class EnableTableHandler extends EventHandler {
private static final Log LOG = LogFactory.getLog(EnableTableHandler.class);
-
private final byte [] tableName;
private final String tableNameStr;
private final AssignmentManager assignmentManager;
@@ -51,7 +55,7 @@
this.ct = catalogTracker;
this.assignmentManager = assignmentManager;
// Check if table exists
- if(!MetaReader.tableExists(catalogTracker, this.tableNameStr)) {
+ if (!MetaReader.tableExists(catalogTracker, this.tableNameStr)) {
throw new TableNotFoundException(Bytes.toString(tableName));
}
}
@@ -63,25 +67,113 @@
handleEnableTable();
} catch (IOException e) {
LOG.error("Error trying to enable the table " + this.tableNameStr, e);
+ } catch (KeeperException e) {
+ LOG.error("Error trying to enable the table " + this.tableNameStr, e);
}
}
- private void handleEnableTable() throws IOException {
- if (!this.assignmentManager.isTableDisabled(this.tableNameStr)) {
- LOG.info("Table " + tableNameStr + " is not disabled; skipping enable");
+ private void handleEnableTable() throws IOException, KeeperException {
+ if (this.assignmentManager.getZKTable().isEnabledTable(this.tableNameStr)) {
+ LOG.info("Table " + tableNameStr + " is already enabled; skipping enable");
return;
}
- // Get the regions of this table
- List regions = MetaReader.getTableRegions(this.ct, tableName);
- // Set the table as disabled so it doesn't get re-onlined
- assignmentManager.undisableTable(this.tableNameStr);
- // Verify all regions of table are disabled
- for (HRegionInfo region : regions) {
- assignmentManager.assign(region, true);
+ // I could check table is disabling and if so, not enable but require
+ // that user first finish disabling but that might be obnoxious.
+
+ // Set table enabling flag up in zk.
+ this.assignmentManager.getZKTable().setEnablingTable(this.tableNameStr);
+ boolean done = false;
+ while (true) {
+ // Get the regions of this table. We're done when all listed
+ // tables are onlined.
+ List regionsInMeta =
+ MetaReader.getTableRegions(this.ct, tableName);
+ int countOfRegionsInTable = regionsInMeta.size();
+ List regions = regionsToAssign(regionsInMeta);
+ if (regions.size() == 0) {
+ done = true;
+ break;
+ }
+ LOG.info("Table has " + countOfRegionsInTable + " regions of which " +
+ regions.size() + " are online.");
+ BulkEnabler bd = new BulkEnabler(this.server, regions,
+ countOfRegionsInTable);
+ try {
+ if (bd.bulkAssign()) {
+ done = true;
+ break;
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Enable was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ break;
+ }
}
- // Wait on table's regions to clear region in transition.
- for (HRegionInfo region: regions) {
- this.assignmentManager.waitOnRegionToClearRegionsInTransition(region);
+ // Flip the table to disabled.
+ if (done) this.assignmentManager.getZKTable().setEnabledTable(this.tableNameStr);
+ LOG.info("Enabled table is done=" + done);
+ }
+
+ /**
+ * @param regionsInMeta This datastructure is edited by this method.
+ * @return The regionsInMeta list minus the regions that have
+ * been onlined; i.e. List of regions that need onlining.
+ * @throws IOException
+ */
+ private List regionsToAssign(final List regionsInMeta)
+ throws IOException {
+ final List onlineRegions =
+ this.assignmentManager.getRegionsOfTable(tableName);
+ regionsInMeta.removeAll(onlineRegions);
+ return regionsInMeta;
+ }
+
+ /**
+ * Run bulk enable.
+ */
+ class BulkEnabler extends BulkAssigner {
+ private final List regions;
+ // Count of regions in table at time this assign was launched.
+ private final int countOfRegionsInTable;
+
+ BulkEnabler(final Server server, final List regions,
+ final int countOfRegionsInTable) {
+ super(server);
+ this.regions = regions;
+ this.countOfRegionsInTable = countOfRegionsInTable;
}
+
+ @Override
+ protected void populatePool(ExecutorService pool) {
+ for (HRegionInfo region: regions) {
+ if (assignmentManager.isRegionInTransition(region) != null) continue;
+ final HRegionInfo hri = region;
+ pool.execute(new Runnable() {
+ public void run() {
+ assignmentManager.assign(hri, true);
+ }
+ });
+ }
+ }
+
+ @Override
+ protected boolean waitUntilDone(long timeout)
+ throws InterruptedException {
+ long startTime = System.currentTimeMillis();
+ long remaining = timeout;
+ List regions = null;
+ while (!server.isStopped() && remaining > 0) {
+ Thread.sleep(500);
+ regions = assignmentManager.getRegionsOfTable(tableName);
+ if (isDone(regions)) break;
+ remaining = timeout - (System.currentTimeMillis() - startTime);
+ }
+ return isDone(regions);
+ }
+
+ private boolean isDone(final List regions) {
+ return regions != null && regions.size() >= this.countOfRegionsInTable;
+ }
}
-}
\ No newline at end of file
+}
Index: src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java (working copy)
@@ -92,7 +92,7 @@
regionInfo.getEncodedName() + ")", e);
}
this.assignmentManager.regionOnline(regionInfo, serverInfo);
- if (assignmentManager.isTableDisabled(
+ if (this.assignmentManager.getZKTable().isDisabledTable(
regionInfo.getTableDesc().getNameAsString())) {
LOG.debug("Opened region " + regionInfo.getRegionNameAsString() + " but "
+ "this table is disabled, triggering close of region");
Index: src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (working copy)
@@ -62,7 +62,7 @@
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
-import org.apache.hadoop.hbase.zookeeper.ZKTableDisable;
+import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.KeeperException;
@@ -474,23 +474,30 @@
return available.get();
}
+ private synchronized ZKTable getZKTable() throws IOException {
+ try {
+ // Create ZKTable each time because it goes to ensemble on construction.
+ // Don't cache it. Will get stale view on zk if you do.
+ return new ZKTable(this.zooKeeper);
+ } catch (KeeperException e) {
+ throw new IOException("Failed ZKTable construction", e);
+ }
+ }
+
/*
* @param True if table is online
*/
- private boolean testTableOnlineState(byte[] tableName, boolean online)
+ private boolean testTableOnlineState(byte [] tableName, boolean online)
throws IOException {
if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
// The root region is always enabled
- return true;
+ return online;
}
- try {
- List tables = ZKTableDisable.getDisabledTables(this.zooKeeper);
- String searchStr = Bytes.toString(tableName);
- boolean disabled = tables.contains(searchStr);
- return online? !disabled: disabled;
- } catch (KeeperException e) {
- throw new IOException("Failed listing disabled tables", e);
+ String tableNameStr = Bytes.toString(tableName);
+ if (online) {
+ return getZKTable().isEnabledTable(tableNameStr);
}
+ return getZKTable().isDisabledTable(tableNameStr);
}
private static class HTableDescriptorFinder
Index: src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 1033197)
+++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy)
@@ -333,7 +333,7 @@
try {
getMaster().createTable(desc, splitKeys);
} catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
+ throw e.unwrapRemoteException();
}
}
@@ -427,37 +427,27 @@
LOG.info("Deleted " + Bytes.toString(tableName));
}
-
-
- /**
- * Brings a table on-line (enables it).
- * Synchronous operation.
- *
- * @param tableName name of the table
- * @throws IOException if a remote or network exception occurs
- */
- public void enableTable(final String tableName) throws IOException {
+ public void enableTable(final String tableName)
+ throws IOException {
enableTable(Bytes.toBytes(tableName));
}
/**
- * Brings a table on-line (enables it).
- * Synchronous operation.
- *
+ * Enable a table. May timeout. Use {@link #enableTableAsync(byte[])}
+ * and {@link #isTableEnabled(byte[])} instead.
* @param tableName name of the table
* @throws IOException if a remote or network exception occurs
+ * @see {@link #isTableEnabled(byte[])}
+ * @see {@link #disableTable(byte[])}
+ * @see {@link #enableTableAsync(byte[])}
*/
- public void enableTable(final byte [] tableName) throws IOException {
- isMasterRunning();
-
+ public void enableTable(final byte [] tableName)
+ throws IOException {
+ enableTableAsync(tableName);
+
// Wait until all regions are enabled
boolean enabled = false;
for (int tries = 0; tries < this.numRetries; tries++) {
- try {
- getMaster().enableTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
enabled = isTableEnabled(tableName);
if (enabled) {
break;
@@ -470,12 +460,11 @@
try {
Thread.sleep(sleep);
} catch (InterruptedException e) {
- // continue
+ Thread.currentThread().interrupt();
+ // Do this conversion rather than let it out because do not want to
+ // change the method signature.
+ throw new IOException("Interrupted", e);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Wake. Waiting for all regions to be enabled from " +
- Bytes.toString(tableName));
- }
}
if (!enabled) {
throw new IOException("Unable to enable table " +
@@ -485,53 +474,84 @@
}
/**
- * Disables a table (takes it off-line) If it is being served, the master
- * will tell the servers to stop serving it.
- * Synchronous operation.
- *
- * @param tableName name of table
- * @throws IOException if a remote or network exception occurs
+ * Brings a table on-line (enables it). Method returns immediately though
+ * enable of table may take some time to complete, especially if the table
+ * is large (All regions are opened as part of enabling process). Check
+ * {@link #isTableEnabled(byte[])} to learn when table is fully online. If
+ * table is taking too long to online, check server logs.
+ * @param tableName
+ * @throws IOException
+ * @since 0.90.0
*/
- public void disableTable(final String tableName) throws IOException {
- disableTable(Bytes.toBytes(tableName));
+ public void enableTableAsync(final byte [] tableName)
+ throws IOException {
+ isMasterRunning();
+ try {
+ getMaster().enableTable(tableName);
+ } catch (RemoteException e) {
+ throw e.unwrapRemoteException();
+ }
+ LOG.info("Started enable of " + Bytes.toString(tableName));
}
/**
- * Disables a table (takes it off-line) If it is being served, the master
- * will tell the servers to stop serving it.
- * Synchronous operation.
- *
+ * Starts the disable of a table. If it is being served, the master
+ * will tell the servers to stop serving it. This method returns immediately.
+ * The disable of a table can take some time if the table is large (all
+ * regions are closed as part of table disable operation).
+ * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
+ * If table is taking too long to online, check server logs.
* @param tableName name of table
* @throws IOException if a remote or network exception occurs
+ * @see {@link #isTableDisabled(byte[])}
+ * @see {@link #isTableEnabled(byte[])}
+ * @since 0.90.0
*/
- public void disableTable(final byte [] tableName) throws IOException {
+ public void disableTableAsync(final byte [] tableName) throws IOException {
isMasterRunning();
+ try {
+ getMaster().disableTable(tableName);
+ } catch (RemoteException e) {
+ throw e.unwrapRemoteException();
+ }
+ LOG.info("Started disable of " + Bytes.toString(tableName));
+ }
- // Wait until all regions are disabled
+ public void disableTable(final String tableName)
+ throws IOException {
+ disableTable(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * Disable table and wait on completion. May timeout. Use
+ * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
+ * instead.
+ * @param tableName
+ * @throws IOException
+ */
+ public void disableTable(final byte [] tableName)
+ throws IOException {
+ disableTableAsync(tableName);
+ // Wait until table is disabled
boolean disabled = false;
for (int tries = 0; tries < this.numRetries; tries++) {
- try {
- getMaster().disableTable(tableName);
- } catch (RemoteException e) {
- throw RemoteExceptionHandler.decodeRemoteException(e);
- }
disabled = isTableDisabled(tableName);
if (disabled) {
break;
}
+ long sleep = getPauseTime(tries);
if (LOG.isDebugEnabled()) {
- LOG.debug("Sleep. Waiting for all regions to be disabled from " +
- Bytes.toString(tableName));
+ LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
+ "disabled in " + Bytes.toString(tableName));
}
try {
- Thread.sleep(getPauseTime(tries));
+ Thread.sleep(sleep);
} catch (InterruptedException e) {
- // continue
+ // Do this conversion rather than let it out because do not want to
+ // change the method signature.
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted", e);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Wake. Waiting for all regions to be disabled from " +
- Bytes.toString(tableName));
- }
}
if (!disabled) {
throw new RegionException("Retries exhausted, it took too long to wait"+
@@ -562,6 +582,15 @@
* @return true if table is off-line
* @throws IOException if a remote or network exception occurs
*/
+ public boolean isTableDisabled(final String tableName) throws IOException {
+ return isTableDisabled(Bytes.toBytes(tableName));
+ }
+
+ /**
+ * @param tableName name of table to check
+ * @return true if table is off-line
+ * @throws IOException if a remote or network exception occurs
+ */
public boolean isTableDisabled(byte[] tableName) throws IOException {
return connection.isTableDisabled(tableName);
}
Index: src/main/ruby/hbase/admin.rb
===================================================================
--- src/main/ruby/hbase/admin.rb (revision 1033197)
+++ src/main/ruby/hbase/admin.rb (working copy)
@@ -87,6 +87,12 @@
end
#----------------------------------------------------------------------------------------------
+ # Is table disabled?
+ def disabled?(table_name)
+ @admin.isTableDisabled(table_name)
+ end
+
+ #----------------------------------------------------------------------------------------------
# Drops a table
def drop(table_name)
raise ArgumentError, "Table #{table_name} does not exist.'" unless exists?(table_name)
Index: src/main/ruby/shell/commands/enable.rb
===================================================================
--- src/main/ruby/shell/commands/enable.rb (revision 1033197)
+++ src/main/ruby/shell/commands/enable.rb (working copy)
@@ -23,7 +23,7 @@
class Enable < Command
def help
return <<-EOF
- Enable the named table: e.g. "hbase> enable 't1'"
+ Start enable of named table: e.g. "hbase> enable 't1'"
EOF
end
Index: src/main/ruby/shell/commands/disable.rb
===================================================================
--- src/main/ruby/shell/commands/disable.rb (revision 1033197)
+++ src/main/ruby/shell/commands/disable.rb (working copy)
@@ -23,7 +23,7 @@
class Disable < Command
def help
return <<-EOF
- Disable the named table: e.g. "hbase> disable 't1'"
+ Start disable of named table: e.g. "hbase> disable 't1'"
EOF
end
Index: src/main/ruby/shell/commands/is_enabled.rb
===================================================================
--- src/main/ruby/shell/commands/is_enabled.rb (revision 0)
+++ src/main/ruby/shell/commands/is_enabled.rb (revision 0)
@@ -0,0 +1,78 @@
+#
+# Copyright 2010 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class IsEnabled < Command
+ def help
+ return <<-EOF
+ Is named table enabled?: e.g. "hbase> is_enabled 't1'"
+ EOF
+ end
+
+ def command(table)
+ format_simple_command do
+ formatter.row([
+ admin.enabled?(table)? "true" : "false"
+ ])
+ end
+ end
+ end
+ end
+end
+#
+# Copyright 2010 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class IsEnabled < Command
+ def help
+ return <<-EOF
+ Is table enabled: e.g. "hbase> enable 't1'"
+ EOF
+ end
+
+ def command(table)
+ format_simple_command do
+ formatter.row([
+ admin.enabled?(table) ? "true" : "false"
+ ])
+ end
+ end
+ end
+ end
+end
Index: src/main/ruby/shell/commands/is_disabled.rb
===================================================================
--- src/main/ruby/shell/commands/is_disabled.rb (revision 0)
+++ src/main/ruby/shell/commands/is_disabled.rb (revision 0)
@@ -0,0 +1,78 @@
+#
+# Copyright 2010 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class IsDisabled < Command
+ def help
+ return <<-EOF
+ Is named table disabled?: e.g. "hbase> is_disabled 't1'"
+ EOF
+ end
+
+ def command(table)
+ format_simple_command do
+ formatter.row([
+ admin.disabled?(table)? "true" : "false"
+ ])
+ end
+ end
+ end
+ end
+end
+#
+# Copyright 2010 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class IsDisabled < Command
+ def help
+ return <<-EOF
+ Is table disabled: e.g. "hbase> is_disabled 't1'"
+ EOF
+ end
+
+ def command(table)
+ format_simple_command do
+ formatter.row([
+ admin.disabled?(table) ? "true" : "false"
+ ])
+ end
+ end
+ end
+ end
+end
Index: src/main/ruby/shell.rb
===================================================================
--- src/main/ruby/shell.rb (revision 1033197)
+++ src/main/ruby/shell.rb (working copy)
@@ -217,8 +217,10 @@
create
describe
disable
+ is_disabled
drop
enable
+ is_enabled
exists
list
]