Index: src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (revision 1158129) +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (working copy) @@ -271,7 +271,8 @@ HRegionInfo090x hrfm = getHRegionInfoForMigration(r); if (hrfm == null) return true; htds.add(hrfm.getTableDesc()); - masterServices.getMasterFileSystem().createTableDescriptor(hrfm.getTableDesc()); + masterServices.getMasterFileSystem().createTableDescriptor( + hrfm.getTableDesc(), false); HRegionInfo regionInfo = new HRegionInfo(hrfm); LOG.debug(" MetaEditor.updatemeta RegionInfo = " + regionInfo.toString() + " old HRI = " + hrfm.toString()); Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1158129) +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -976,7 +976,7 @@ // tableDir is created. Should we change below method to be createTable // where we create table in tmp dir with its table descriptor file and then // do rename to move it into place? - FSUtils.createTableDescriptor(hTableDescriptor, conf); + FSUtils.createTableDescriptor(hTableDescriptor, conf, false); // 1. Set table enabling flag up in zk. try { Index: src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1158129) +++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy) @@ -412,10 +412,14 @@ /** * Create new HTableDescriptor in HDFS. + * Passing true to forceCreation will overwrite the tabledescriptor + * if already present. * @param htableDescriptor + * @param forceCreation */ - public void createTableDescriptor(HTableDescriptor htableDescriptor) { - FSUtils.createTableDescriptor(htableDescriptor, conf); + public void createTableDescriptor(HTableDescriptor htableDescriptor, + boolean forceCreation) throws IOException { + FSUtils.createTableDescriptor(htableDescriptor, conf, forceCreation); } /** Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1158129) +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy) @@ -939,41 +939,47 @@ /** * Create new HTableDescriptor in HDFS. Happens when we are creating table. + * If forceCreation is true then even if previous table descriptor is present + * it will be overwritten /** * @param htableDescriptor * @param conf + * @param forceCreation */ - public static void createTableDescriptor(HTableDescriptor htableDescriptor, - Configuration conf) { - try { - FileSystem fs = getCurrentFileSystem(conf); - createTableDescriptor(fs, getRootDir(conf), htableDescriptor); - } catch(IOException ioe) { - LOG.info("IOException while trying to create tableInfo in HDFS", ioe); - } + public static boolean createTableDescriptor( + HTableDescriptor htableDescriptor, Configuration conf, + boolean forceCreation) throws IOException { + FileSystem fs = getCurrentFileSystem(conf); + return createTableDescriptor(fs, getRootDir(conf), htableDescriptor, + forceCreation); } /** * @param fs * @param htableDescriptor * @param rootdir + * @param forceCreation */ - public static void createTableDescriptor(FileSystem fs, - Path rootdir, HTableDescriptor htableDescriptor) { + public static boolean createTableDescriptor(FileSystem fs, Path rootdir, + HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException { try { - Path tableInfoPath = - getTableInfoPath(rootdir, htableDescriptor.getNameAsString()); - LOG.info("Current tableInfoPath = " + tableInfoPath) ; - if (fs.exists(tableInfoPath) && - fs.getFileStatus(tableInfoPath).getLen() > 0) { - LOG.info("TableInfo already exists.. Skipping creation"); - return; + Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor + .getNameAsString()); + LOG.info("Current tableInfoPath = " + tableInfoPath); + if (!forceCreation) { + if (fs.exists(tableInfoPath) + && fs.getFileStatus(tableInfoPath).getLen() > 0) { + LOG.info("TableInfo already exists.. Skipping creation"); + return false; + } } - writeTableDescriptor(fs, htableDescriptor, - getTablePath(rootdir, htableDescriptor.getNameAsString())); - } catch(IOException ioe) { + writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir, + htableDescriptor.getNameAsString()), forceCreation); + } catch (IOException ioe) { LOG.info("IOException while trying to create tableInfo in HDFS", ioe); + throw ioe; } + return true; } /** @@ -996,7 +1002,7 @@ * @throws IOException */ private static void writeTableDescriptor(FileSystem fs, - HTableDescriptor hTableDescriptor, Path tableDir) + HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation) throws IOException { // Create in tmpdir and then move into place in case we crash after // create but before close. If we don't successfully close the file, @@ -1006,6 +1012,12 @@ Path tmpPath = new Path(new Path(tableDir,".tmp"), HConstants.TABLEINFO_NAME); LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath); writeHTD(fs, tmpPath, hTableDescriptor); + if (forceCreation) { + if (!fs.delete(tableInfoPath, false)) { + throw new IOException("Unable to delete " + tableInfoPath + + " while forcefully writing table descriptor."); + } + } if (!fs.rename(tmpPath, tableInfoPath)) { throw new IOException("Unable to rename " + tmpPath + " to " + tableInfoPath); Index: src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (working copy) @@ -264,7 +264,7 @@ Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled); + FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled, false); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled); @@ -276,7 +276,7 @@ HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled); + FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled, false); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled); List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( @@ -581,7 +581,7 @@ Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled); + FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled, false); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled); @@ -593,7 +593,7 @@ HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo - FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled); + FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled, false); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled); Index: src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/MultiRegionTable.java (working copy) @@ -89,7 +89,7 @@ Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); filesystem.mkdirs(rootdir); - FSUtils.createTableDescriptor(fs, rootdir, desc); + FSUtils.createTableDescriptor(fs, rootdir, desc, false); HRegion[] regions = new HRegion[KEYS.length]; for (int i = 0; i < regions.length; i++) { Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (working copy) @@ -72,7 +72,7 @@ HTU.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(tablename); - FSUtils.createTableDescriptor(htd, HTU.getConfiguration()); + FSUtils.createTableDescriptor(htd, HTU.getConfiguration(), false); HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); HTableDescriptor htd2 = hri.getTableDesc(); Index: src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java (revision 0) +++ src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java (revision 0) @@ -0,0 +1,84 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestFSTableDescriptorForceCreation { + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + @BeforeClass + public static void setUpCluster() throws Exception { + UTIL.startMiniDFSCluster(1); + } + + @AfterClass + public static void shutDownCluster() throws Exception { + UTIL.shutdownMiniDFSCluster(); + } + + @Test + public void testShouldCreateNewTableDescriptorIfForceFulCreationIsFalse() + throws IOException { + final String name = "newTable2"; + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + Path rootdir = new Path(fs.getWorkingDirectory(), name); + HTableDescriptor htd = new HTableDescriptor(name); + assertTrue("Should create new table descriptor", FSUtils + .createTableDescriptor(fs, rootdir, htd, false)); + } + + @Test + public void testShouldNotCreateTheSameTableDescriptorIfForceFulCreationIsFalse() + throws IOException { + final String name = "testAlreadyExists"; + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + // Cleanup old tests if any detrius laying around. + Path rootdir = new Path(fs.getWorkingDirectory(), name); + TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + HTableDescriptor htd = new HTableDescriptor(name); + htds.add(htd); + assertFalse("Should not create new table descriptor", FSUtils + .createTableDescriptor(fs, rootdir, htd, false)); + } + + @Test + public void testShouldAllowForceFulCreationOfAlreadyExistingTableDescriptor() + throws Exception { + final String name = "createNewTableNew2"; + FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + Path rootdir = new Path(fs.getWorkingDirectory(), name); + HTableDescriptor htd = new HTableDescriptor(name); + FSUtils.createTableDescriptor(fs, rootdir, htd, false); + assertTrue("Should create new table descriptor", FSUtils + .createTableDescriptor(fs, rootdir, htd, true)); + } +} Index: src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java (working copy) @@ -94,7 +94,7 @@ closeRootAndMeta(); // Add new table descriptor file - FSUtils.createTableDescriptor(this.desc, this.conf); + FSUtils.createTableDescriptor(this.desc, this.conf, false); } /** Index: src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java (working copy) @@ -58,7 +58,7 @@ this.desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); // Write the table schema to the fs FSUtils.createTableDescriptor(FileSystem.get(this.conf), this.testDir, - this.desc); + this.desc, false); // Region 0 will contain the key range [,row_0500) INFOS[0] = new HRegionInfo(desc.getName(), HConstants.EMPTY_START_ROW, Index: src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java (working copy) @@ -71,7 +71,7 @@ private void createHTDInFS(final FileSystem fs, Path rootdir, final HTableDescriptor htd) throws IOException { - FSUtils.createTableDescriptor(fs, rootdir, htd); + FSUtils.createTableDescriptor(fs, rootdir, htd, false); } @Test public void testHTableDescriptors() Index: src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (working copy) @@ -96,7 +96,7 @@ // Create regions and populate them at same time. Create the tabledir // for them first. - FSUtils.createTableDescriptor(fs, rootdir, desc); + FSUtils.createTableDescriptor(fs, rootdir, desc, false); HRegion [] regions = { createRegion(desc, null, row_70001, 1, 70000, rootdir), createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), Index: src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (revision 1158129) +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (working copy) @@ -134,7 +134,7 @@ try { // Create root and meta regions createRootAndMetaRegions(); - FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc); + FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc, false); /* * Create the regions we will merge */