diff --git src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 03bd313..ef762f9 100644 --- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -341,7 +341,8 @@ public class AssignmentManager extends ZooKeeperListener { MetaReader.getTableRegions(this.master.getCatalogTracker(), tableName); Integer pending = 0; for(HRegionInfo hri : hris) { - if(regionsToReopen.get(hri.getEncodedName()) != null) { + String name = hri.getEncodedName(); + if (regionsToReopen.containsKey(name) || regionsInTransition.containsKey(name)) { pending++; } } diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 657e371..c414adf 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -94,6 +94,7 @@ import org.apache.hadoop.hbase.master.handler.ModifyTableHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler; import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler; +import org.apache.hadoop.hbase.master.handler.TableEventHandler; import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler; import org.apache.hadoop.hbase.master.metrics.MasterMetrics; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; @@ -1387,7 +1388,10 @@ Server { if (cpHost != null) { cpHost.preModifyTable(tableName, htd); } - this.executorService.submit(new ModifyTableHandler(tableName, htd, this, this)); + TableEventHandler tblHandler = new ModifyTableHandler(tableName, htd, this, this); + this.executorService.submit(tblHandler); + // prevent client from querying status even before the event is being handled. + tblHandler.waitForEventBeingHandled(); if (cpHost != null) { cpHost.postModifyTable(tableName, htd); } diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java index 3fb50b2..cfad403 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java @@ -50,11 +50,8 @@ public class TableAddFamilyHandler extends TableEventHandler { @Override protected void handleTableOperation(List hris) throws IOException { - // Update table descriptor in HDFS - HTableDescriptor htd = this.masterServices.getMasterFileSystem() - .addColumn(tableName, familyDesc); - // Update in-memory descriptor cache - this.masterServices.getTableDescriptors().add(htd); + // Update table descriptor + this.masterServices.getMasterFileSystem().addColumn(tableName, familyDesc); } @Override public String toString() { diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index 9322ba6..b520762 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -45,13 +45,10 @@ public class TableDeleteFamilyHandler extends TableEventHandler { @Override protected void handleTableOperation(List hris) throws IOException { - // Update table descriptor in HDFS - HTableDescriptor htd = - this.masterServices.getMasterFileSystem().deleteColumn(tableName, familyName); - // Update in-memory descriptor cache - this.masterServices.getTableDescriptors().add(htd); - // Remove the column family from the file system MasterFileSystem mfs = this.masterServices.getMasterFileSystem(); + // Update table descriptor + mfs.deleteColumn(tableName, familyName); + // Remove the column family from the file system for (HRegionInfo hri : hris) { // Delete the family directory in FS for all the regions one by one mfs.deleteFamilyFromFS(hri, familyName); diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index aeaa4ad..354c593 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master.handler; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -59,6 +60,7 @@ public abstract class TableEventHandler extends EventHandler { protected final MasterServices masterServices; protected final byte [] tableName; protected final String tableNameStr; + protected boolean isEventBeingHandled = false; public TableEventHandler(EventType eventType, byte [] tableName, Server server, MasterServices masterServices) @@ -108,16 +110,27 @@ public abstract class TableEventHandler extends EventHandler { LOG.error("Error manipulating table " + Bytes.toString(tableName), e); } catch (KeeperException e) { LOG.error("Error manipulating table " + Bytes.toString(tableName), e); + } finally { + notifyEventBeingHandled(); } } public boolean reOpenAllRegions(List regions) throws IOException { boolean done = false; + HTable table = null; + TreeMap> serverToRegions = Maps.newTreeMap(); + NavigableMap hriHserverMapping; + LOG.info("Bucketing regions by region server..."); - HTable table = new HTable(masterServices.getConfiguration(), tableName); - TreeMap> serverToRegions = Maps - .newTreeMap(); - NavigableMap hriHserverMapping = table.getRegionLocations(); + + try { + table = new HTable(masterServices.getConfiguration(), tableName); + hriHserverMapping = table.getRegionLocations(); + } finally { + if (table != null) { + table.close(); + } + } List reRegions = new ArrayList(); for (HRegionInfo hri : regions) { ServerName rsLocation = hriHserverMapping.get(hri); @@ -139,6 +152,7 @@ public abstract class TableEventHandler extends EventHandler { LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size() + " region servers."); this.masterServices.getAssignmentManager().setRegionsToReopen(reRegions); + notifyEventBeingHandled(); BulkReOpen bulkReopen = new BulkReOpen(this.server, serverToRegions, this.masterServices.getAssignmentManager()); while (true) { @@ -189,4 +203,26 @@ public abstract class TableEventHandler extends EventHandler { protected abstract void handleTableOperation(List regions) throws IOException, KeeperException; + + /** + * Table modifications are processed asynchronously, but provide an API for you to query their + * status. + * @throws IOException + */ + public synchronized void waitForEventBeingHandled() throws IOException { + if (!this.isEventBeingHandled) { + try { + wait(); + } catch (InterruptedException ie) { + throw (IOException) new InterruptedIOException().initCause(ie); + } + } + } + + private synchronized void notifyEventBeingHandled() { + if (!this.isEventBeingHandled) { + isEventBeingHandled = true; + notify(); + } + } } diff --git src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java new file mode 100644 index 0000000..8b6e8b9 --- /dev/null +++ src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java @@ -0,0 +1,160 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.handler; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Set; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Verify that the HTableDescriptor is updated after + * addColumn(), deleteColumn() and modifyTable() operations. + */ +@Category(LargeTests.class) +public class TestTableDescriptorModification { + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final byte[] TABLE_NAME = Bytes.toBytes("table"); + private static final byte[] FAMILY_0 = Bytes.toBytes("cf0"); + private static final byte[] FAMILY_1 = Bytes.toBytes("cf1"); + + /** + * Start up a mini cluster and put a small table of empty regions into it. + * + * @throws Exception + */ + @BeforeClass + public static void beforeAllTests() throws Exception { + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void afterAllTests() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testModifyTable() throws IOException { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with one family + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table adding another family and verify the descriptor + HTableDescriptor modifiedHtd = new HTableDescriptor(TABLE_NAME); + modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.modifyTable(TABLE_NAME, modifiedHtd); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testAddColumn() throws IOException { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table removing one family and verify the descriptor + admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testDeleteColumn() throws IOException { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + // Modify the table removing one family and verify the descriptor + admin.deleteColumn(TABLE_NAME, FAMILY_1); + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + private void verifyTableDescriptor(final byte[] tableName, final byte[]... families) + throws IOException { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + + // Verify descriptor from master + HTableDescriptor htd = admin.getTableDescriptor(tableName); + verifyTableDescriptor(htd, tableName, families); + + // Verify descriptor from HDFS + MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName); + htd = FSTableDescriptors.getTableDescriptor(mfs.getFileSystem(), tableDir); + verifyTableDescriptor(htd, tableName, families); + } + + private void verifyTableDescriptor(final HTableDescriptor htd, + final byte[] tableName, final byte[]... families) { + Set htdFamilies = htd.getFamiliesKeys(); + assertTrue(Bytes.equals(tableName, htd.getName())); + assertEquals(families.length, htdFamilies.size()); + for (byte[] familyName: families) { + assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName)); + } + } +} \ No newline at end of file