diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 5afaedf..65db28f 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -895,7 +895,7 @@ public class HConnectionManager { deleteCachedLocation(tableName, row); } - // Query the root or meta region for the location of the meta region + // Query the root or meta region for the location of the meta region regionInfoRow = server.getClosestRowBefore( metaLocation.getRegionInfo().getRegionName(), metaKey, HConstants.CATALOG_FAMILY); diff --git a/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index b9c850d..adf2879 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; /** @@ -333,9 +334,9 @@ public class MasterFileSystem { private void createRootTableInfo(Path rd) throws IOException { // Create ROOT tableInfo if required. - if (!FSUtils.tableInfoExists(fs, rd, + if (!FSTableDescriptors.tableInfoExists(fs, rd, Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) { - FSUtils.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf); + FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf); } } @@ -412,24 +413,13 @@ public class MasterFileSystem { } /** - * Get table info path for a table. - * @param tableName - * @return Table info path - */ - private Path getTableInfoPath(byte[] tableName) { - Path tablePath = new Path(this.rootdir, Bytes.toString(tableName)); - Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME); - return tableInfoPath; - } - - /** * Create new HTableDescriptor in HDFS. * * @param htableDescriptor */ public void createTableDescriptor(HTableDescriptor htableDescriptor) throws IOException { - FSUtils.createTableDescriptor(htableDescriptor, conf); + FSTableDescriptors.createTableDescriptor(htableDescriptor, conf); } /** diff --git a/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index b9d4b90..4600991 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -29,21 +29,21 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.zookeeper.KeeperException; -import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; -import org.apache.hadoop.hbase.TableExistsException; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.catalog.MetaEditor; -import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.zookeeper.KeeperException; /** * Handler to create a table. @@ -137,7 +137,7 @@ public class CreateTableHandler extends EventHandler { // tableDir is created. Should we change below method to be createTable // where we create table in tmp dir with its table descriptor file and then // do rename to move it into place? - FSUtils.createTableDescriptor(this.hTableDescriptor, this.conf); + FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf); List regionInfos = new ArrayList(); final int batchSize = diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 24570c9..bfd0629 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -19,14 +19,27 @@ package org.apache.hadoop.hbase.util; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.Arrays; +import java.util.Comparator; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.PathFilter; import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; @@ -42,7 +55,6 @@ import org.apache.commons.logging.LogFactory; * the filesystem or can be read and write. */ public class FSTableDescriptors implements TableDescriptors { - private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class); private final FileSystem fs; private final Path rootdir; @@ -130,7 +142,7 @@ public class FSTableDescriptors implements TableDescriptors { // Check mod time has not changed (this is trip to NN). long modtime = - FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename); + FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, tablename); if (tdm != null) { if (modtime <= tdm.getModtime()) { cachehits++; @@ -138,7 +150,7 @@ public class FSTableDescriptors implements TableDescriptors { } } HTableDescriptor htd = - FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename); + FSTableDescriptors.getTableDescriptor(this.fs, this.rootdir, tablename); if (htd == null) { // More likely is above will throw a FileNotFoundException throw new TableExistsException("No descriptor for " + tablename); @@ -181,9 +193,9 @@ public class FSTableDescriptors implements TableDescriptors { if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { throw new NotImplementedException(); } - if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd); + if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); long modtime = - FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); + FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); } @@ -201,4 +213,305 @@ public class FSTableDescriptors implements TableDescriptors { TableDescriptorModtime tdm = this.cache.remove(tablename); return tdm == null? null: tdm.getTableDescriptor(); } + + /** + * Get HTableDescriptor + * @param config + * @param tableName + * @return HTableDescriptor for table + * @throws IOException + */ + public static HTableDescriptor getHTableDescriptor(Configuration config, + String tableName) + throws IOException { + Path path = FSUtils.getRootDir(config); + FileSystem fs = path.getFileSystem(config); + return getTableDescriptor(fs, path, tableName); + } + + /** + * Get table info path for a table. + * @param rootdir + * @param tableName + * @return Table info path + */ + static Path getTableInfoPath(Path rootdir, String tablename) { + Path tablePath = FSUtils.getTablePath(rootdir, tablename); + return new Path(tablePath, HConstants.TABLEINFO_NAME); + } + + /** + * Checks if .tableinfo exists for given table + * + * @param fs file system + * @param rootdir root directory of HBase installation + * @param tableName name of table + * @return true if exists + * @throws IOException + */ + public static boolean tableInfoExists(FileSystem fs, Path rootdir, + String tableName) throws IOException { + Path tablePath = FSTableDescriptors.getTableInfoPath(rootdir, tableName); + return fs.exists(tablePath); + } + + /** + * @param fs + * @param rootdir + * @param tablename + * @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file. + * @throws IOException + */ + public static long getTableInfoModtime(final FileSystem fs, final Path rootdir, + final String tablename) + throws IOException { + Path p = FSUtils.getTablePath(rootdir, tablename); + FileStatus [] status = fs.listStatus(p, new PathFilter() { + @Override + public boolean accept(Path p) { + // Accept any file that starts with TABLEINFO_NAME + return p.getName().startsWith(HConstants.TABLEINFO_NAME); + } + }); + if (status == null || status.length < 1) { + throw new FileNotFoundException("No status for " + p.toString()); + } + Arrays.sort(status, new TableInfoFileStatusComparator()); + return status[0].getModificationTime(); + } + + /** + * Compare {@link FileStatus} instances by {@link Path#getName()}. + * Return in reverse order. + */ + static class TableInfoFileStatusComparator + implements Comparator { + @Override + public int compare(FileStatus left, FileStatus right) { + return -left.compareTo(right); + } + } + + /** + * Get HTD from HDFS. + * @param fs + * @param hbaseRootDir + * @param tableName + * @return Descriptor or null if none found. + * @throws IOException + */ + public static HTableDescriptor getTableDescriptor(FileSystem fs, + Path hbaseRootDir, byte[] tableName) + throws IOException { + return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName)); + } + + public static HTableDescriptor getTableDescriptor(FileSystem fs, + Path hbaseRootDir, String tableName) { + HTableDescriptor htd = null; + try { + htd = getTableDescriptor(fs, FSUtils.getTablePath(hbaseRootDir, tableName)); + } catch (NullPointerException e) { + LOG.debug("Exception during readTableDecriptor. Current table name = " + + tableName , e); + } catch (IOException ioe) { + LOG.debug("Exception during readTableDecriptor. Current table name = " + + tableName , ioe); + } + return htd; + } + + public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir) + throws IOException, NullPointerException { + if (tableDir == null) throw new NullPointerException(); + Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME); + FSDataInputStream fsDataInputStream = fs.open(tableinfo); + HTableDescriptor hTableDescriptor = null; + try { + hTableDescriptor = new HTableDescriptor(); + hTableDescriptor.readFields(fsDataInputStream); + } finally { + fsDataInputStream.close(); + } + return hTableDescriptor; + } + + /** + * Update table descriptor + * @param fs + * @param conf + * @param hTableDescriptor + * @throws IOException + */ + public static void updateHTableDescriptor(FileSystem fs, Path rootdir, + HTableDescriptor hTableDescriptor) + throws IOException { + Path tableInfoPath = + FSTableDescriptors.getTableInfoPath(rootdir, hTableDescriptor.getNameAsString()); + writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true); + LOG.info("Updated tableinfo=" + tableInfoPath + " to " + + hTableDescriptor.toString()); + } + + private static void writeHTD(final FileSystem fs, final Path p, + final HTableDescriptor htd) + throws IOException { + FSDataOutputStream out = fs.create(p, true); + try { + htd.write(out); + out.write('\n'); + out.write('\n'); + out.write(Bytes.toBytes(htd.toString())); + } finally { + out.close(); + } + } + + /** + * Create new HTableDescriptor in HDFS. Happens when we are creating table. + * + * @param fs + * @param htableDescriptor + * @param rootdir + */ + public static boolean createTableDescriptor(FileSystem fs, Path rootdir, + HTableDescriptor htableDescriptor) throws IOException { + return createTableDescriptor(fs, rootdir, htableDescriptor, false); + } + + /** + * Create new HTableDescriptor in HDFS. Happens when we are creating table. If + * forceCreation is true then even if previous table descriptor is present it + * will be overwritten + * + * @param fs + * @param htableDescriptor + * @param rootdir + * @param forceCreation + */ + public static boolean createTableDescriptor(FileSystem fs, Path rootdir, + HTableDescriptor htableDescriptor, boolean forceCreation) + throws IOException { + Path tableInfoPath = FSTableDescriptors.getTableInfoPath(rootdir, htableDescriptor + .getNameAsString()); + LOG.info("Current tableInfoPath = " + tableInfoPath); + if (!forceCreation) { + if (fs.exists(tableInfoPath) + && fs.getFileStatus(tableInfoPath).getLen() > 0) { + LOG.info("TableInfo already exists.. Skipping creation"); + return false; + } + } + writeTableDescriptor(fs, htableDescriptor, + FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), + forceCreation); + + return true; + } + + /** + * Deletes a table's directory from the file system if exists. Used in unit + * tests. + */ + public static void deleteTableDescriptorIfExists(String tableName, + Configuration conf) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path tableInfoPath = + FSTableDescriptors.getTableInfoPath(FSUtils.getRootDir(conf), tableName); + if (fs.exists(tableInfoPath)) FSUtils.deleteDirectory(fs, tableInfoPath); + } + + /** + * Called when we are creating a table to write out the tables' descriptor. + * @param fs + * @param hTableDescriptor + * @param tableDir + * @param forceCreation True if we are to force creation + * @throws IOException + */ + private static void writeTableDescriptor(FileSystem fs, + HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation) + throws IOException { + // Create in tmpdir and then move into place in case we crash after + // create but before close. If we don't successfully close the file, + // subsequent region reopens will fail the below because create is + // registered in NN. + Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME); + Path tmpPath = new Path(new Path(tableDir, ".tmp"), + HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis()); + LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath); + try { + writeHTD(fs, tmpPath, hTableDescriptor); + } catch (IOException e) { + LOG.error("Unable to write the tabledescriptor in the path" + tmpPath + + ".", e); + fs.delete(tmpPath, true); + throw e; + } + // TODO: The below is less than ideal and likely error prone. There is a + // better rename in hadoops after 0.20 that takes rename options (this has + // its own issues according to mighty Todd in that old readers may fail + // as we cross the renme transition) but until then, we have this + // forceCreation flag which does a delete and then we rename so there is a + // hole. Need to fix. + try { + if (forceCreation) { + if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) { + String errMsg = "Unable to delete " + tableInfoPath + + " while forcefully writing the table descriptor."; + LOG.error(errMsg); + throw new IOException(errMsg); + } + } + if (!fs.rename(tmpPath, tableInfoPath)) { + String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath; + LOG.error(errMsg); + throw new IOException(errMsg); + } else { + LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath); + } + } finally { + fs.delete(tmpPath, true); + } + } + + /** + * Create new HTableDescriptor in HDFS. Happens when we are creating table. + * + * @param htableDescriptor + * @param conf + */ + public static boolean createTableDescriptor( + HTableDescriptor htableDescriptor, Configuration conf) throws IOException { + return createTableDescriptor(htableDescriptor, conf, false); + } + + /** + * Create new HTableDescriptor in HDFS. Happens when we are creating table. If + * forceCreation is true then even if previous table descriptor is present it + * will be overwritten + * + * @param htableDescriptor + * @param conf + * @param forceCreation + */ + public static boolean createTableDescriptor( + HTableDescriptor htableDescriptor, Configuration conf, + boolean forceCreation) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, + forceCreation); + } + + /** + * Compare {@link FileStatus} instances by {@link Path#getName()}. + * Return in reverse order. + */ + static class TableInfoFileStatusComparator + implements Comparator { + @Override + public int compare(FileStatus left, FileStatus right) { + return -left.compareTo(right); + } + } } diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 789dd3b..a1ebb8f 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -44,11 +44,11 @@ import org.apache.hadoop.util.StringUtils; import java.io.DataInputStream; import java.io.EOFException; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,7 +62,7 @@ public abstract class FSUtils { protected FSUtils() { super(); } - + public static FSUtils getInstance(FileSystem fs, Configuration conf) { String scheme = fs.getUri().getScheme(); if (scheme == null) { @@ -491,21 +491,6 @@ public abstract class FSUtils { } /** - * Checks if .tableinfo exists for given table - * - * @param fs file system - * @param rootdir root directory of HBase installation - * @param tableName name of table - * @return true if exists - * @throws IOException - */ - public static boolean tableInfoExists(FileSystem fs, Path rootdir, - String tableName) throws IOException { - Path tablePath = getTableInfoPath(rootdir, tableName); - return fs.exists(tablePath); - } - - /** * Compute HDFS blocks distribution of a given file, or a portion of the file * @param fs file system * @param FileStatus file status of the file @@ -850,35 +835,6 @@ public abstract class FSUtils { return tabledirs; } - /** - * Get table info path for a table. - * @param rootdir - * @param tableName - * @return Table info path - */ - private static Path getTableInfoPath(Path rootdir, String tablename) { - Path tablePath = getTablePath(rootdir, tablename); - return new Path(tablePath, HConstants.TABLEINFO_NAME); - } - - /** - * @param fs - * @param rootdir - * @param tablename - * @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file. - * @throws IOException - */ - public static long getTableInfoModtime(final FileSystem fs, final Path rootdir, - final String tablename) - throws IOException { - Path p = getTableInfoPath(rootdir, tablename); - FileStatus [] status = fs.listStatus(p); - if (status == null || status.length < 1) { - throw new FileNotFoundException("No status for " + p.toString()); - } - return status[0].getModificationTime(); - } - public static Path getTablePath(Path rootdir, byte [] tableName) { return getTablePath(rootdir, Bytes.toString(tableName)); } @@ -887,236 +843,16 @@ public abstract class FSUtils { return new Path(rootdir, tableName); } - private static FileSystem getCurrentFileSystem(Configuration conf) - throws IOException { - return getRootDir(conf).getFileSystem(conf); - } - - /** - * Get HTableDescriptor - * @param config - * @param tableName - * @return HTableDescriptor for table - * @throws IOException - */ - public static HTableDescriptor getHTableDescriptor(Configuration config, - String tableName) - throws IOException { - Path path = getRootDir(config); - FileSystem fs = path.getFileSystem(config); - return getTableDescriptor(fs, path, tableName); - } - - /** - * Get HTD from HDFS. - * @param fs - * @param hbaseRootDir - * @param tableName - * @return Descriptor or null if none found. - * @throws IOException - */ - public static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, byte[] tableName) - throws IOException { - return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName)); - } - - public static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, String tableName) { - HTableDescriptor htd = null; - try { - htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName)); - } catch (NullPointerException e) { - LOG.debug("Exception during readTableDecriptor. Current table name = " + - tableName , e); - } catch (IOException ioe) { - LOG.debug("Exception during readTableDecriptor. Current table name = " + - tableName , ioe); - } - return htd; - } - - public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir) - throws IOException, NullPointerException { - if (tableDir == null) throw new NullPointerException(); - Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME); - FSDataInputStream fsDataInputStream = fs.open(tableinfo); - HTableDescriptor hTableDescriptor = null; - try { - hTableDescriptor = new HTableDescriptor(); - hTableDescriptor.readFields(fsDataInputStream); - } finally { - fsDataInputStream.close(); - } - return hTableDescriptor; - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. - * - * @param htableDescriptor - * @param conf - */ - public static boolean createTableDescriptor( - HTableDescriptor htableDescriptor, Configuration conf) throws IOException { - return createTableDescriptor(htableDescriptor, conf, false); - } - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * - * @param htableDescriptor * @param conf - * @param forceCreation - */ - public static boolean createTableDescriptor( - HTableDescriptor htableDescriptor, Configuration conf, - boolean forceCreation) throws IOException { - FileSystem fs = getCurrentFileSystem(conf); - return createTableDescriptor(fs, getRootDir(conf), htableDescriptor, - forceCreation); - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. - * - * @param fs - * @param htableDescriptor - * @param rootdir - */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor) throws IOException { - return createTableDescriptor(fs, rootdir, htableDescriptor, false); - } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * - * @param fs - * @param htableDescriptor - * @param rootdir - * @param forceCreation - */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor, boolean forceCreation) - throws IOException { - Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor - .getNameAsString()); - LOG.info("Current tableInfoPath = " + tableInfoPath); - if (!forceCreation) { - if (fs.exists(tableInfoPath) - && fs.getFileStatus(tableInfoPath).getLen() > 0) { - LOG.info("TableInfo already exists.. Skipping creation"); - return false; - } - } - writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir, - htableDescriptor.getNameAsString()), forceCreation); - - return true; - } - - /** - * Deletes a table's directory from the file system if exists. Used in unit - * tests. - */ - public static void deleteTableDescriptorIfExists(String tableName, - Configuration conf) throws IOException { - FileSystem fs = getCurrentFileSystem(conf); - Path tableInfoPath = getTableInfoPath(getRootDir(conf), tableName); - if (fs.exists(tableInfoPath)) - deleteDirectory(fs, tableInfoPath); - } - - /** - * Called when we are creating a table to write out the tables' descriptor. - * @param fs - * @param hTableDescriptor - * @param tableDir - * @param forceCreation True if we are to force creation + * @return Returns the filesystem of the hbase rootdir. * @throws IOException */ - private static void writeTableDescriptor(FileSystem fs, - HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation) + public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException { - // Create in tmpdir and then move into place in case we crash after - // create but before close. If we don't successfully close the file, - // subsequent region reopens will fail the below because create is - // registered in NN. - Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME); - Path tmpPath = new Path(new Path(tableDir, ".tmp"), - HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis()); - LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath); - try { - writeHTD(fs, tmpPath, hTableDescriptor); - } catch (IOException e) { - LOG.error("Unable to write the tabledescriptor in the path" + tmpPath - + ".", e); - fs.delete(tmpPath, true); - throw e; - } - // TODO: The below is less than ideal and likely error prone. There is a - // better rename in hadoops after 0.20 that takes rename options (this has - // its own issues according to mighty Todd in that old readers may fail - // as we cross the renme transition) but until then, we have this - // forceCreation flag which does a delete and then we rename so there is a - // hole. Need to fix. - try { - if (forceCreation) { - if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) { - String errMsg = "Unable to delete " + tableInfoPath - + " while forcefully writing the table descriptor."; - LOG.error(errMsg); - throw new IOException(errMsg); - } - } - if (!fs.rename(tmpPath, tableInfoPath)) { - String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath; - LOG.error(errMsg); - throw new IOException(errMsg); - } else { - LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath); - } - } finally { - fs.delete(tmpPath, true); - } - } - - /** - * Update table descriptor - * @param fs - * @param conf - * @param hTableDescriptor - * @throws IOException - */ - public static void updateHTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor hTableDescriptor) - throws IOException { - Path tableInfoPath = - getTableInfoPath(rootdir, hTableDescriptor.getNameAsString()); - writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true); - LOG.info("Updated tableinfo=" + tableInfoPath + " to " + - hTableDescriptor.toString()); + return getRootDir(conf).getFileSystem(conf); } - private static void writeHTD(final FileSystem fs, final Path p, - final HTableDescriptor htd) - throws IOException { - FSDataOutputStream out = fs.create(p, true); - try { - htd.write(out); - out.write('\n'); - out.write('\n'); - out.write(Bytes.toBytes(htd.toString())); - } finally { - out.close(); - } - } - /** * Runs through the HBase rootdir and creates a reverse lookup map for * table StoreFile names to the full Path. diff --git a/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 9f0499e..dbce091 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -152,7 +152,7 @@ class HMerge { fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))), Bytes.toString(tableName) ); - this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir); + this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir); Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/src/main/java/org/apache/hadoop/hbase/util/Merge.java index 3aa980f..67d0fda 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -237,7 +237,7 @@ public class Merge extends Configured implements Tool { if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta2); } - HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()), + HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()), this.rootdir, this.tableName); HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2); diff --git a/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index 8a69a39..395467f 100644 --- a/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ b/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -27,7 +27,6 @@ import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -52,8 +51,8 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(fs.getWorkingDirectory(), name); HTableDescriptor htd = new HTableDescriptor(name); - assertTrue("Should create new table descriptor", FSUtils - .createTableDescriptor(fs, rootdir, htd, false)); + assertTrue("Should create new table descriptor", + FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false)); } @Test @@ -66,8 +65,8 @@ public class TestFSTableDescriptorForceCreation { TableDescriptors htds = new FSTableDescriptors(fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); htds.add(htd); - assertFalse("Should not create new table descriptor", FSUtils - .createTableDescriptor(fs, rootdir, htd, false)); + assertFalse("Should not create new table descriptor", + FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false)); } @Test @@ -77,8 +76,8 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(fs.getWorkingDirectory(), name); HTableDescriptor htd = new HTableDescriptor(name); - FSUtils.createTableDescriptor(fs, rootdir, htd, false); - assertTrue("Should create new table descriptor", FSUtils - .createTableDescriptor(fs, rootdir, htd, true)); + FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false); + assertTrue("Should create new table descriptor", + FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true)); } } diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 0c1042a..fc366fe 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; +import java.util.Arrays; import org.junit.Test; @@ -64,7 +65,7 @@ public class TestFSTableDescriptors { Path rootdir = HBaseTestingUtility.getTestDir(name); createHTDInFS(fs, rootdir, htd); HTableDescriptor htd2 = - FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString()); + FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString()); assertTrue(htd.equals(htd2)); } @@ -145,4 +146,30 @@ public class TestFSTableDescriptors { htds.add(htd); htds.add(htd); } + + @Test + public void testTableInfoFileStatusComparator() { + FileStatus bare = + new FileStatus(0, false, 0, 0, -1, new Path("/tmp/tablinfo")); + FileStatus future = + new FileStatus(0, false, 0, 0, -1, + new Path("/tmp/tablinfo." + System.currentTimeMillis())); + FileStatus farFuture = + new FileStatus(0, false, 0, 0, -1, + new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000)); + FileStatus [] alist = {bare, future, farFuture}; + FileStatus [] blist = {bare, farFuture, future}; + FileStatus [] clist = {farFuture, bare, future}; + FSUtils.TableInfoFileStatusComparator c = + new FSUtils.TableInfoFileStatusComparator(); + Arrays.sort(alist, c); + Arrays.sort(blist, c); + Arrays.sort(clist, c); + // Now assert all sorted same in way we want. + for (int i = 0; i < alist.length; i++) { + assertTrue(alist[i].equals(blist[i])); + assertTrue(blist[i].equals(clist[i])); + assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare)); + } + } } diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index 1ad30e6..f79078f 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.util.Arrays; + import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.fs.FSDataOutputStream; @@ -35,7 +37,33 @@ import org.junit.Test; * Test {@link FSUtils}. */ public class TestFSUtils { - + + @Test + public void testTableInfoFileStatusComparator() { + FileStatus bare = + new FileStatus(0, false, 0, 0, -1, new Path("/tmp/tablinfo")); + FileStatus future = + new FileStatus(0, false, 0, 0, -1, + new Path("/tmp/tablinfo." + System.currentTimeMillis())); + FileStatus farFuture = + new FileStatus(0, false, 0, 0, -1, + new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000)); + FileStatus [] alist = {bare, future, farFuture}; + FileStatus [] blist = {bare, farFuture, future}; + FileStatus [] clist = {farFuture, bare, future}; + FSUtils.TableInfoFileStatusComparator c = + new FSUtils.TableInfoFileStatusComparator(); + Arrays.sort(alist, c); + Arrays.sort(blist, c); + Arrays.sort(clist, c); + // Now assert all sorted same in way we want. + for (int i = 0; i < alist.length; i++) { + assertTrue(alist[i].equals(blist[i])); + assertTrue(blist[i].equals(clist[i])); + assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare)); + } + } + @Test public void testIsHDFS() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); htu.getConfiguration().setBoolean("dfs.support.append", false);