diff --git conf/hbase-env.sh conf/hbase-env.sh
index 4ae577c..2aa723d 100644
--- conf/hbase-env.sh
+++ conf/hbase-env.sh
@@ -34,7 +34,7 @@
# Below are what we set by default. May only work with SUN JVM.
# For more on why as well as other possible settings,
# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC"
# Uncomment below to enable java garbage collection logging in the .out file.
# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
diff --git src/docbkx/performance.xml src/docbkx/performance.xml
index a54fe42..c2fffbb 100644
--- src/docbkx/performance.xml
+++ src/docbkx/performance.xml
@@ -119,7 +119,10 @@
must be explicitly enabled in HBase 0.90.x (Its defaulted to be on in
0.92.x HBase). See hbase.hregion.memstore.mslab.enabled
to true in your Configuration. See the cited
- slides for background and detail.
+ slides for background and detailThe latest jvms do better
+ regards fragmentation so make sure you are running a recent release.
+ Read down in the message,
+ Identifying concurrent mode failures caused by fragmentation..
For more information about GC logs, see .
diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java
index 76278a2..0ea80eb 100644
--- src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -182,9 +182,6 @@ public final class HConstants {
/** Used to construct the name of the compaction directory during compaction */
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
- /** The file name used to store HTD in HDFS */
- public static final String TABLEINFO_NAME = ".tableinfo";
-
/** Default maximum file size */
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
diff --git src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 46ca765..77db18e 100644
--- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
/**
@@ -335,9 +336,9 @@ public class MasterFileSystem {
private void createRootTableInfo(Path rd) throws IOException {
// Create ROOT tableInfo if required.
- if (!FSUtils.tableInfoExists(fs, rd,
+ if (!FSTableDescriptors.isTableInfoExists(fs, rd,
Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) {
- FSUtils.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
+ FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
}
}
@@ -420,7 +421,7 @@ public class MasterFileSystem {
*/
public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
- FSUtils.createTableDescriptor(htableDescriptor, conf);
+ FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
}
/**
diff --git src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index b9d4b90..4600991 100644
--- src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -29,21 +29,21 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.zookeeper.KeeperException;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.zookeeper.KeeperException;
/**
* Handler to create a table.
@@ -137,7 +137,7 @@ public class CreateTableHandler extends EventHandler {
// tableDir is created. Should we change below method to be createTable
// where we create table in tmp dir with its table descriptor file and then
// do rename to move it into place?
- FSUtils.createTableDescriptor(this.hTableDescriptor, this.conf);
+ FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf);
List regionInfos = new ArrayList();
final int batchSize =
diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 24570c9..cf435a8 100644
--- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -19,20 +19,29 @@ package org.apache.hadoop.hbase.util;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
/**
@@ -42,7 +51,6 @@ import org.apache.commons.logging.LogFactory;
* the filesystem or can be read and write.
*/
public class FSTableDescriptors implements TableDescriptors {
-
private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class);
private final FileSystem fs;
private final Path rootdir;
@@ -50,6 +58,9 @@ public class FSTableDescriptors implements TableDescriptors {
long cachehits = 0;
long invocations = 0;
+ /** The file name used to store HTD in HDFS */
+ public static final String TABLEINFO_NAME = ".tableinfo";
+
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
@@ -130,7 +141,7 @@ public class FSTableDescriptors implements TableDescriptors {
// Check mod time has not changed (this is trip to NN).
long modtime =
- FSUtils.getTableInfoModtime(this.fs, this.rootdir, tablename);
+ FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, tablename);
if (tdm != null) {
if (modtime <= tdm.getModtime()) {
cachehits++;
@@ -138,7 +149,7 @@ public class FSTableDescriptors implements TableDescriptors {
}
}
HTableDescriptor htd =
- FSUtils.getTableDescriptor(this.fs, this.rootdir, tablename);
+ FSTableDescriptors.getTableDescriptor(this.fs, this.rootdir, tablename);
if (htd == null) {
// More likely is above will throw a FileNotFoundException
throw new TableExistsException("No descriptor for " + tablename);
@@ -181,9 +192,9 @@ public class FSTableDescriptors implements TableDescriptors {
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) {
throw new NotImplementedException();
}
- if (!this.fsreadonly) FSUtils.updateHTableDescriptor(this.fs, this.rootdir, htd);
+ if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd);
long modtime =
- FSUtils.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
+ FSTableDescriptors.getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString());
this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd));
}
@@ -201,4 +212,317 @@ public class FSTableDescriptors implements TableDescriptors {
TableDescriptorModtime tdm = this.cache.remove(tablename);
return tdm == null? null: tdm.getTableDescriptor();
}
-}
+
+ /**
+ * Checks if .tableinfo exists for given table
+ *
+ * @param fs file system
+ * @param rootdir root directory of HBase installation
+ * @param tableName name of table
+ * @return true if exists
+ * @throws IOException
+ */
+ public static boolean isTableInfoExists(FileSystem fs, Path rootdir,
+ String tableName) throws IOException {
+ FileStatus status =
+ FSTableDescriptors.getTableInfoPath(fs, rootdir, tableName);
+ return status == null? false: fs.exists(status.getPath());
+ }
+
+ private static FileStatus getTableInfoPath(final FileSystem fs,
+ final Path rootdir, final String tableName)
+ throws IOException {
+ Path tabledir = FSUtils.getTablePath(rootdir, tableName);
+ return getTableInfoPath(fs, tabledir);
+ }
+
+ private static FileStatus getTableInfoPath(final FileSystem fs,
+ final Path tabledir)
+ throws IOException {
+ FileStatus [] status = fs.listStatus(tabledir, new PathFilter() {
+ @Override
+ public boolean accept(Path p) {
+ // Accept any file that starts with TABLEINFO_NAME
+ return p.getName().startsWith(TABLEINFO_NAME);
+ }
+ });
+ if (status == null || status.length < 1) return null;
+ Arrays.sort(status, new TableInfoFileStatusComparator());
+ if (status.length > 1) {
+ // Clean away old versions of .tableinfo
+ for (int i = 1; i < status.length; i++) {
+ // Clean up old versions
+ if (!fs.delete(status[i].getPath(), false)) {
+ LOG.warn("Failed cleanup of " + status);
+ }
+ }
+ }
+ return status[0];
+ }
+
+ /**
+ * Compare {@link FileStatus} instances by {@link Path#getName()}.
+ * Returns in reverse order.
+ */
+ static class TableInfoFileStatusComparator
+ implements Comparator {
+ @Override
+ public int compare(FileStatus left, FileStatus right) {
+ return -left.compareTo(right);
+ }
+ }
+
+ /**
+ * Width of the sequenceid that is suffix on tableinfo.
+ */
+ static final int WIDTH_OF_SEQUENCE_ID = 10;
+
+ /**
+ * Regex to eat up sequenceid suffix on a .tableinfo file.
+ */
+ private static final Pattern SUFFIX =
+ Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$");
+
+ /*
+ * @param number
+ * @return Returns zero-prefixed 5-byte wide decimal version of passed
+ * number (Does absolute in case number is negative).
+ */
+ static String formatTableInfoSequenceId(final int number) {
+ byte [] b = new byte[WIDTH_OF_SEQUENCE_ID];
+ int d = Math.abs(number);
+ for (int i = b.length - 1; i >= 0; i--) {
+ b[i] = (byte)((d % 10) + '0');
+ d /= 10;
+ }
+ return Bytes.toString(b);
+ }
+
+ /**
+ * @param p Path to a .tableinfo file.
+ * @return The current editid or 0 if none found.
+ */
+ static int getTableInfoSequenceid(final Path p) {
+ if (p == null) return 0;
+ Matcher m = SUFFIX.matcher(p.getName());
+ return m.matches()? Integer.parseInt(m.group(2)): 0;
+ }
+
+ /**
+ * @param tabledir
+ * @param sequenceid
+ * @return Name of tableinfo file.
+ */
+ static Path getTableInfoFileName(final Path tabledir, final int sequenceid) {
+ return new Path(tabledir,
+ TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid));
+ }
+
+ /**
+ * @param fs
+ * @param rootdir
+ * @param tableName
+ * @return Modification time for the table {@link #TABLEINFO_NAME} file
+ * or 0 if no tableinfo file found.
+ * @throws IOException
+ */
+ static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
+ final String tableName)
+ throws IOException {
+ FileStatus status = getTableInfoPath(fs, rootdir, tableName);
+ return status == null? 0: status.getModificationTime();
+ }
+
+ /**
+ * Get HTD from HDFS.
+ * @param fs
+ * @param hbaseRootDir
+ * @param tableName
+ * @return Descriptor or null if none found.
+ * @throws IOException
+ */
+ public static HTableDescriptor getTableDescriptor(FileSystem fs,
+ Path hbaseRootDir, byte[] tableName)
+ throws IOException {
+ return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
+ }
+
+ static HTableDescriptor getTableDescriptor(FileSystem fs,
+ Path hbaseRootDir, String tableName) {
+ HTableDescriptor htd = null;
+ try {
+ htd = getTableDescriptor(fs, FSUtils.getTablePath(hbaseRootDir, tableName));
+ } catch (NullPointerException e) {
+ LOG.debug("Exception during readTableDecriptor. Current table name = " +
+ tableName , e);
+ } catch (IOException ioe) {
+ LOG.debug("Exception during readTableDecriptor. Current table name = " +
+ tableName , ioe);
+ }
+ return htd;
+ }
+
+ public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
+ throws IOException, NullPointerException {
+ if (tableDir == null) throw new NullPointerException();
+ FileStatus status = getTableInfoPath(fs, tableDir);
+ if (status == null) return null;
+ FSDataInputStream fsDataInputStream = fs.open(status.getPath());
+ HTableDescriptor hTableDescriptor = null;
+ try {
+ hTableDescriptor = new HTableDescriptor();
+ hTableDescriptor.readFields(fsDataInputStream);
+ } finally {
+ fsDataInputStream.close();
+ }
+ return hTableDescriptor;
+ }
+
+ /**
+ * Update table descriptor
+ * @param fs
+ * @param conf
+ * @param hTableDescriptor
+ * @return New tableinfo
+ * @throws IOException
+ */
+ static Path updateHTableDescriptor(FileSystem fs, Path rootdir,
+ HTableDescriptor hTableDescriptor)
+ throws IOException {
+ Path tabledir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName());
+ Path p = writeTableDescriptor(fs, hTableDescriptor, tabledir, true);
+ LOG.info("Updated tableinfo=" + p);
+ return p;
+ }
+
+ private static void writeHTD(final FileSystem fs, final Path p,
+ final HTableDescriptor htd)
+ throws IOException {
+ FSDataOutputStream out = fs.create(p, true);
+ try {
+ htd.write(out);
+ out.write('\n');
+ out.write('\n');
+ out.write(Bytes.toBytes(htd.toString()));
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+ *
+ * @param fs
+ * @param htableDescriptor
+ * @param rootdir
+ */
+ public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
+ HTableDescriptor htableDescriptor) throws IOException {
+ return createTableDescriptor(fs, rootdir, htableDescriptor, false);
+ }
+
+ /**
+ * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
+ * forceCreation is true then even if previous table descriptor is present it
+ * will be overwritten
+ *
+ * @param fs
+ * @param htableDescriptor
+ * @param rootdir
+ * @param forceCreation
+ */
+ public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
+ HTableDescriptor htableDescriptor, boolean forceCreation)
+ throws IOException {
+ FileStatus status = getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString());
+ if (status != null) {
+ LOG.info("Current tableInfoPath = " + status.getPath());
+ if (!forceCreation) {
+ if (fs.exists(status.getPath()) && status.getLen() > 0) {
+ LOG.info("TableInfo already exists.. Skipping creation");
+ return false;
+ }
+ }
+ }
+ writeTableDescriptor(fs, htableDescriptor,
+ FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()),
+ forceCreation);
+ return true;
+ }
+
+ /**
+ * Deletes a table's directory from the file system if exists. Used in unit
+ * tests.
+ */
+ public static void deleteTableDescriptorIfExists(String tableName,
+ Configuration conf) throws IOException {
+ FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+ FileStatus status =
+ FSTableDescriptors.getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
+ // The below deleteDirectory works for either file or directory.
+ if (fs.exists(status.getPath())) FSUtils.deleteDirectory(fs, status.getPath());
+ }
+
+ /**
+ * Called when we are creating a table to write out the tables' descriptor.
+ * @param fs
+ * @param hTableDescriptor
+ * @param tableDir
+ * @param forceCreation True if we are to force creation
+ * @param status The status of the current tableinfo; can be null
+ * @throws IOException
+ */
+ private static Path writeTableDescriptor(FileSystem fs,
+ HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
+ throws IOException {
+ FileStatus status = getTableInfoPath(fs, tableDir);
+ int sequenceid = getTableInfoSequenceid(status == null? null: status.getPath());
+ Path tableInfoPath = null;
+ do {
+ sequenceid += 1;
+ tableInfoPath = getTableInfoFileName(tableDir, sequenceid);
+ } while (fs.exists(tableInfoPath));
+ try {
+ writeHTD(fs, tableInfoPath, hTableDescriptor);
+ if (status != null) {
+ if (!fs.delete(status.getPath(), false)) {
+ LOG.warn("Failed delete of " + status.getPath());
+ }
+ }
+ } catch (IOException e) {
+ LOG.error("Unable to write the tabledescriptor in the path" + tableInfoPath
+ + ".", e);
+ fs.delete(tableInfoPath, true);
+ throw e;
+ }
+ return tableInfoPath;
+ }
+
+ /**
+ * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+ *
+ * @param htableDescriptor
+ * @param conf
+ */
+ public static boolean createTableDescriptor(
+ HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
+ return createTableDescriptor(htableDescriptor, conf, false);
+ }
+
+ /**
+ * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
+ * forceCreation is true then even if previous table descriptor is present it
+ * will be overwritten
+ *
+ * @param htableDescriptor
+ * @param conf
+ * @param forceCreation
+ */
+ public static boolean createTableDescriptor(
+ HTableDescriptor htableDescriptor, Configuration conf,
+ boolean forceCreation) throws IOException {
+ FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+ return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
+ forceCreation);
+ }
+}
\ No newline at end of file
diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 36d90c3..79ac2b1 100644
--- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.util;
import java.io.DataInputStream;
import java.io.EOFException;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -44,7 +43,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -63,7 +61,7 @@ public abstract class FSUtils {
protected FSUtils() {
super();
}
-
+
public static FSUtils getInstance(FileSystem fs, Configuration conf) {
String scheme = fs.getUri().getScheme();
if (scheme == null) {
@@ -156,7 +154,8 @@ public abstract class FSUtils {
* @return true if dfs is in safemode, false otherwise.
*
*/
- private static boolean isInSafeMode(FileSystem fs) throws IOException {
+ private static boolean isInSafeMode(FileSystem fs)
+ throws IOException {
// Refactored safe-mode check for HBASE-4510
if (fs instanceof DistributedFileSystem) {
Path rootPath = new Path("/");
@@ -451,6 +450,7 @@ public abstract class FSUtils {
public static void waitOnSafeMode(final Configuration conf,
final long wait)
throws IOException {
+ Path rootDir = getRootDir(conf);
FileSystem fs = FileSystem.get(conf);
// Make sure dfs is not in safe mode
while (isInSafeMode(fs)) {
@@ -505,21 +505,6 @@ public abstract class FSUtils {
}
/**
- * Checks if .tableinfo exists for given table
- *
- * @param fs file system
- * @param rootdir root directory of HBase installation
- * @param tableName name of table
- * @return true if exists
- * @throws IOException
- */
- public static boolean tableInfoExists(FileSystem fs, Path rootdir,
- String tableName) throws IOException {
- Path tablePath = getTableInfoPath(rootdir, tableName);
- return fs.exists(tablePath);
- }
-
- /**
* Compute HDFS blocks distribution of a given file, or a portion of the file
* @param fs file system
* @param status file status of the file
@@ -864,35 +849,6 @@ public abstract class FSUtils {
return tabledirs;
}
- /**
- * Get table info path for a table.
- * @param rootdir
- * @param tableName
- * @return Table info path
- */
- private static Path getTableInfoPath(Path rootdir, String tablename) {
- Path tablePath = getTablePath(rootdir, tablename);
- return new Path(tablePath, HConstants.TABLEINFO_NAME);
- }
-
- /**
- * @param fs
- * @param rootdir
- * @param tablename
- * @return Modification time for the table {@link HConstants#TABLEINFO_NAME} file.
- * @throws IOException
- */
- public static long getTableInfoModtime(final FileSystem fs, final Path rootdir,
- final String tablename)
- throws IOException {
- Path p = getTableInfoPath(rootdir, tablename);
- FileStatus [] status = fs.listStatus(p);
- if (status == null || status.length < 1) {
- throw new FileNotFoundException("No status for " + p.toString());
- }
- return status[0].getModificationTime();
- }
-
public static Path getTablePath(Path rootdir, byte [] tableName) {
return getTablePath(rootdir, Bytes.toString(tableName));
}
@@ -901,234 +857,14 @@ public abstract class FSUtils {
return new Path(rootdir, tableName);
}
- private static FileSystem getCurrentFileSystem(Configuration conf)
- throws IOException {
- return getRootDir(conf).getFileSystem(conf);
- }
-
- /**
- * Get HTableDescriptor
- * @param config
- * @param tableName
- * @return HTableDescriptor for table
- * @throws IOException
- */
- public static HTableDescriptor getHTableDescriptor(Configuration config,
- String tableName)
- throws IOException {
- Path path = getRootDir(config);
- FileSystem fs = path.getFileSystem(config);
- return getTableDescriptor(fs, path, tableName);
- }
-
- /**
- * Get HTD from HDFS.
- * @param fs
- * @param hbaseRootDir
- * @param tableName
- * @return Descriptor or null if none found.
- * @throws IOException
- */
- public static HTableDescriptor getTableDescriptor(FileSystem fs,
- Path hbaseRootDir, byte[] tableName)
- throws IOException {
- return getTableDescriptor(fs, hbaseRootDir, Bytes.toString(tableName));
- }
-
- public static HTableDescriptor getTableDescriptor(FileSystem fs,
- Path hbaseRootDir, String tableName) {
- HTableDescriptor htd = null;
- try {
- htd = getTableDescriptor(fs, getTablePath(hbaseRootDir, tableName));
- } catch (NullPointerException e) {
- LOG.debug("Exception during readTableDecriptor. Current table name = " +
- tableName , e);
- } catch (IOException ioe) {
- LOG.debug("Exception during readTableDecriptor. Current table name = " +
- tableName , ioe);
- }
- return htd;
- }
-
- public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir)
- throws IOException, NullPointerException {
- if (tableDir == null) throw new NullPointerException();
- Path tableinfo = new Path(tableDir, HConstants.TABLEINFO_NAME);
- FSDataInputStream fsDataInputStream = fs.open(tableinfo);
- HTableDescriptor hTableDescriptor = null;
- try {
- hTableDescriptor = new HTableDescriptor();
- hTableDescriptor.readFields(fsDataInputStream);
- } finally {
- fsDataInputStream.close();
- }
- return hTableDescriptor;
- }
-
/**
- * Create new HTableDescriptor in HDFS. Happens when we are creating table.
- *
- * @param htableDescriptor
* @param conf
- */
- public static boolean createTableDescriptor(
- HTableDescriptor htableDescriptor, Configuration conf) throws IOException {
- return createTableDescriptor(htableDescriptor, conf, false);
- }
-
- /**
- * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
- * forceCreation is true then even if previous table descriptor is present it
- * will be overwritten
- *
- * @param htableDescriptor
- * @param conf
- * @param forceCreation
- */
- public static boolean createTableDescriptor(
- HTableDescriptor htableDescriptor, Configuration conf,
- boolean forceCreation) throws IOException {
- FileSystem fs = getCurrentFileSystem(conf);
- return createTableDescriptor(fs, getRootDir(conf), htableDescriptor,
- forceCreation);
- }
-
- /**
- * Create new HTableDescriptor in HDFS. Happens when we are creating table.
- *
- * @param fs
- * @param htableDescriptor
- * @param rootdir
- */
- public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
- HTableDescriptor htableDescriptor) throws IOException {
- return createTableDescriptor(fs, rootdir, htableDescriptor, false);
- }
-
- /**
- * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
- * forceCreation is true then even if previous table descriptor is present it
- * will be overwritten
- *
- * @param fs
- * @param htableDescriptor
- * @param rootdir
- * @param forceCreation
- */
- public static boolean createTableDescriptor(FileSystem fs, Path rootdir,
- HTableDescriptor htableDescriptor, boolean forceCreation)
- throws IOException {
- Path tableInfoPath = getTableInfoPath(rootdir, htableDescriptor
- .getNameAsString());
- LOG.info("Current tableInfoPath = " + tableInfoPath);
- if (!forceCreation) {
- if (fs.exists(tableInfoPath)
- && fs.getFileStatus(tableInfoPath).getLen() > 0) {
- LOG.info("TableInfo already exists.. Skipping creation");
- return false;
- }
- }
- writeTableDescriptor(fs, htableDescriptor, getTablePath(rootdir,
- htableDescriptor.getNameAsString()), forceCreation);
-
- return true;
- }
-
- /**
- * Deletes a table's directory from the file system if exists. Used in unit
- * tests.
- */
- public static void deleteTableDescriptorIfExists(String tableName,
- Configuration conf) throws IOException {
- FileSystem fs = getCurrentFileSystem(conf);
- Path tableInfoPath = getTableInfoPath(getRootDir(conf), tableName);
- if (fs.exists(tableInfoPath))
- deleteDirectory(fs, tableInfoPath);
- }
-
- /**
- * Called when we are creating a table to write out the tables' descriptor.
- * @param fs
- * @param hTableDescriptor
- * @param tableDir
- * @param forceCreation True if we are to force creation
+ * @return Returns the filesystem of the hbase rootdir.
* @throws IOException
*/
- private static void writeTableDescriptor(FileSystem fs,
- HTableDescriptor hTableDescriptor, Path tableDir, boolean forceCreation)
+ public static FileSystem getCurrentFileSystem(Configuration conf)
throws IOException {
- // Create in tmpdir and then move into place in case we crash after
- // create but before close. If we don't successfully close the file,
- // subsequent region reopens will fail the below because create is
- // registered in NN.
- Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
- Path tmpPath = new Path(new Path(tableDir, ".tmp"),
- HConstants.TABLEINFO_NAME + "." + System.currentTimeMillis());
- LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
- try {
- writeHTD(fs, tmpPath, hTableDescriptor);
- } catch (IOException e) {
- LOG.error("Unable to write the tabledescriptor in the path" + tmpPath
- + ".", e);
- fs.delete(tmpPath, true);
- throw e;
- }
- // TODO: The below is less than ideal and likely error prone. There is a
- // better rename in hadoops after 0.20 that takes rename options (this has
- // its own issues according to mighty Todd in that old readers may fail
- // as we cross the renme transition) but until then, we have this
- // forceCreation flag which does a delete and then we rename so there is a
- // hole. Need to fix.
- try {
- if (forceCreation) {
- if (fs.exists(tableInfoPath) && !fs.delete(tableInfoPath, false)) {
- String errMsg = "Unable to delete " + tableInfoPath
- + " while forcefully writing the table descriptor.";
- LOG.error(errMsg);
- throw new IOException(errMsg);
- }
- }
- if (!fs.rename(tmpPath, tableInfoPath)) {
- String errMsg = "Unable to rename " + tmpPath + " to " + tableInfoPath;
- LOG.error(errMsg);
- throw new IOException(errMsg);
- } else {
- LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
- }
- } finally {
- fs.delete(tmpPath, true);
- }
- }
-
- /**
- * Update table descriptor
- * @param fs
- * @param rootdir
- * @param hTableDescriptor
- * @throws IOException
- */
- public static void updateHTableDescriptor(FileSystem fs, Path rootdir,
- HTableDescriptor hTableDescriptor)
- throws IOException {
- Path tableInfoPath =
- getTableInfoPath(rootdir, hTableDescriptor.getNameAsString());
- writeTableDescriptor(fs, hTableDescriptor, tableInfoPath.getParent(), true);
- LOG.info("Updated tableinfo=" + tableInfoPath + " to " +
- hTableDescriptor.toString());
- }
-
- private static void writeHTD(final FileSystem fs, final Path p,
- final HTableDescriptor htd)
- throws IOException {
- FSDataOutputStream out = fs.create(p, true);
- try {
- htd.write(out);
- out.write('\n');
- out.write('\n');
- out.write(Bytes.toBytes(htd.toString()));
- } finally {
- out.close();
- }
+ return getRootDir(conf).getFileSystem(conf);
}
/**
diff --git src/main/java/org/apache/hadoop/hbase/util/HMerge.java src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index a6f6b69..440174c 100644
--- src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -152,7 +152,7 @@ class HMerge {
fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
Bytes.toString(tableName)
);
- this.htd = FSUtils.getTableDescriptor(this.fs, this.tabledir);
+ this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
HConstants.HREGION_LOGDIR_NAME);
Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);
diff --git src/main/java/org/apache/hadoop/hbase/util/Merge.java src/main/java/org/apache/hadoop/hbase/util/Merge.java
index 3aa980f..67d0fda 100644
--- src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -237,7 +237,7 @@ public class Merge extends Configured implements Tool {
if (info2 == null) {
throw new NullPointerException("info2 is null using key " + meta2);
}
- HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()),
+ HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()),
this.rootdir, this.tableName);
HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2);
diff --git src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 8404d1b..6a3cd34 100644
--- src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -1,73 +1,71 @@
-/**
- * Copyright 2011 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.junit.*;
-
-public class TestFSTableDescriptorForceCreation {
- private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
- @Test
- public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
- throws IOException {
- final String name = "newTable2";
- FileSystem fs = FileSystem.get(UTIL.getConfiguration());
- Path rootdir = new Path(UTIL.getDataTestDir(), name);
- HTableDescriptor htd = new HTableDescriptor(name);
-
- assertTrue("Should create new table descriptor",
- FSUtils.createTableDescriptor(fs, rootdir, htd, false));
- }
-
- @Test
- public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse()
- throws IOException {
- final String name = "testAlreadyExists";
- FileSystem fs = FileSystem.get(UTIL.getConfiguration());
- // Cleanup old tests if any detrius laying around.
- Path rootdir = new Path(UTIL.getDataTestDir(), name);
- TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
- HTableDescriptor htd = new HTableDescriptor(name);
- htds.add(htd);
- assertFalse("Should not create new table descriptor", FSUtils
- .createTableDescriptor(fs, rootdir, htd, false));
- }
-
- @Test
- public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
- throws Exception {
- final String name = "createNewTableNew2";
- FileSystem fs = FileSystem.get(UTIL.getConfiguration());
- Path rootdir = new Path(UTIL.getDataTestDir(), name);
- HTableDescriptor htd = new HTableDescriptor(name);
- FSUtils.createTableDescriptor(fs, rootdir, htd, false);
- assertTrue("Should create new table descriptor", FSUtils
- .createTableDescriptor(fs, rootdir, htd, true));
- }
-}
+/**
+ * Copyright 2011 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.junit.*;
+
+public class TestFSTableDescriptorForceCreation {
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ @Test
+ public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
+ throws IOException {
+ final String name = "newTable2";
+ FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+ Path rootdir = new Path(UTIL.getDataTestDir(), name);
+ HTableDescriptor htd = new HTableDescriptor(name);
+ assertTrue("Should create new table descriptor",
+ FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
+ }
+
+ @Test
+ public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse()
+ throws IOException {
+ final String name = "testAlreadyExists";
+ FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+ // Cleanup old tests if any detrius laying around.
+ Path rootdir = new Path(UTIL.getDataTestDir(), name);
+ TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
+ HTableDescriptor htd = new HTableDescriptor(name);
+ htds.add(htd);
+ assertFalse("Should not create new table descriptor",
+ FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false));
+ }
+
+ @Test
+ public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
+ throws Exception {
+ final String name = "createNewTableNew2";
+ FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+ Path rootdir = new Path(UTIL.getDataTestDir(), name);
+ HTableDescriptor htd = new HTableDescriptor(name);
+ FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false);
+ assertTrue("Should create new table descriptor",
+ FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true));
+ }
+}
diff --git src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 49bcf02..722c086 100644
--- src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -357,7 +357,7 @@ public class TestMasterFailover {
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
// Write the .tableinfo
- FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
+ FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
@@ -369,7 +369,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
- FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
+ FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
List disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
@@ -681,7 +681,7 @@ public class TestMasterFailover {
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
// Write the .tableinfo
- FSUtils.createTableDescriptor(filesystem, rootdir, htdEnabled);
+ FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
null, null);
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
@@ -693,7 +693,7 @@ public class TestMasterFailover {
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
- FSUtils.createTableDescriptor(filesystem, rootdir, htdDisabled);
+ FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
index b2cb233..9c9b080 100644
--- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
+++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.Test;
@@ -68,11 +69,11 @@ public class TestHRegionInfo {
// Delete the temporary table directory that might still be there from the
// previous test run.
- FSUtils.deleteTableDescriptorIfExists(tablename,
+ FSTableDescriptors.deleteTableDescriptorIfExists(tablename,
HTU.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(tablename);
- FSUtils.createTableDescriptor(htd, HTU.getConfiguration());
+ FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration());
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HTableDescriptor htd2 = hri.getTableDesc();
diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
index 8fce6ec..e2b2815 100644
--- src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
+++ src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
@@ -21,9 +21,11 @@ import static org.junit.Assert.*;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -43,6 +45,58 @@ public class TestFSTableDescriptors {
private static final Log LOG = LogFactory.getLog(TestFSTableDescriptors.class);
@Test
+ public void testSequenceidAdvancesOnTableInfo() throws IOException {
+ Path testdir = UTIL.getDataTestDir();
+ HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
+ FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+ Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
+ int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
+ Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
+ // Assert we cleaned up the old file.
+ assertTrue(!fs.exists(p0));
+ int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
+ assertTrue(i1 == i0 + 1);
+ Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
+ // Assert we cleaned up the old file.
+ assertTrue(!fs.exists(p1));
+ int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
+ assertTrue(i2 == i1 + 1);
+ }
+
+ @Test
+ public void testFormatTableInfoSequenceId() {
+ Path p0 = assertWriteAndReadSequenceid(0);
+ // Assert p0 has format we expect.
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) {
+ sb.append("0");
+ }
+ assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(),
+ p0.getName());
+ // Check a few more.
+ Path p2 = assertWriteAndReadSequenceid(2);
+ Path p10000 = assertWriteAndReadSequenceid(10000);
+ // Get a .tablinfo that has no sequenceid suffix.
+ Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME);
+ FileStatus fs = new FileStatus(0, false, 0, 0, 0, p);
+ FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0);
+ FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2);
+ FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000);
+ FSTableDescriptors.TableInfoFileStatusComparator comparator =
+ new FSTableDescriptors.TableInfoFileStatusComparator();
+ assertTrue(comparator.compare(fs, fs0) > 0);
+ assertTrue(comparator.compare(fs0, fs2) > 0);
+ assertTrue(comparator.compare(fs2, fs10000) > 0);
+ }
+
+ private Path assertWriteAndReadSequenceid(final int i) {
+ Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i);
+ int ii = FSTableDescriptors.getTableInfoSequenceid(p);
+ assertEquals(i, ii);
+ return p;
+ }
+
+ @Test
public void testRemoves() throws IOException {
final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -62,14 +116,14 @@ public class TestFSTableDescriptors {
Path rootdir = UTIL.getDataTestDir(name);
createHTDInFS(fs, rootdir, htd);
HTableDescriptor htd2 =
- FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString());
+ FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString());
assertTrue(htd.equals(htd2));
}
private void createHTDInFS(final FileSystem fs, Path rootdir,
final HTableDescriptor htd)
throws IOException {
- FSUtils.createTableDescriptor(fs, rootdir, htd);
+ FSTableDescriptors.createTableDescriptor(fs, rootdir, htd);
}
@Test public void testHTableDescriptors()
@@ -102,7 +156,7 @@ public class TestFSTableDescriptors {
for (int i = 0; i < count; i++) {
HTableDescriptor htd = new HTableDescriptor(name + i);
htd.addFamily(new HColumnDescriptor("" + i));
- FSUtils.updateHTableDescriptor(fs, rootdir, htd);
+ FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd);
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@@ -121,7 +175,7 @@ public class TestFSTableDescriptors {
htds.cachehits >= ((count * 2) + 1));
}
- @Test (expected=java.io.FileNotFoundException.class)
+ @Test (expected=org.apache.hadoop.hbase.TableExistsException.class)
public void testNoSuchTable() throws IOException {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@@ -143,4 +197,30 @@ public class TestFSTableDescriptors {
htds.add(htd);
htds.add(htd);
}
-}
+
+ @Test
+ public void testTableInfoFileStatusComparator() {
+ FileStatus bare =
+ new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
+ FileStatus future =
+ new FileStatus(0, false, 0, 0, -1,
+ new Path("/tmp/tablinfo." + System.currentTimeMillis()));
+ FileStatus farFuture =
+ new FileStatus(0, false, 0, 0, -1,
+ new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
+ FileStatus [] alist = {bare, future, farFuture};
+ FileStatus [] blist = {bare, farFuture, future};
+ FileStatus [] clist = {farFuture, bare, future};
+ FSTableDescriptors.TableInfoFileStatusComparator c =
+ new FSTableDescriptors.TableInfoFileStatusComparator();
+ Arrays.sort(alist, c);
+ Arrays.sort(blist, c);
+ Arrays.sort(clist, c);
+ // Now assert all sorted same in way we want.
+ for (int i = 0; i < alist.length; i++) {
+ assertTrue(alist[i].equals(blist[i]));
+ assertTrue(blist[i].equals(clist[i]));
+ assertTrue(clist[i].equals(i == 0? farFuture: i == 1? future: bare));
+ }
+ }
+}
\ No newline at end of file
diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 1ad30e6..aaddeca 100644
--- src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -35,7 +35,6 @@ import org.junit.Test;
* Test {@link FSUtils}.
*/
public class TestFSUtils {
-
@Test public void testIsHDFS() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
htu.getConfiguration().setBoolean("dfs.support.append", false);
diff --git src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
index ffc8d9d..b09644b 100644
--- src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
+++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
@@ -96,7 +96,7 @@ public class TestMergeTable {
// Create regions and populate them at same time. Create the tabledir
// for them first.
- FSUtils.createTableDescriptor(fs, rootdir, desc);
+ FSTableDescriptors.createTableDescriptor(fs, rootdir, desc);
HRegion [] regions = {
createRegion(desc, null, row_70001, 1, 70000, rootdir),
createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
diff --git src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
index 88dc9de..b92cacd 100644
--- src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
+++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
@@ -139,7 +139,7 @@ public class TestMergeTool extends HBaseTestCase {
try {
// Create root and meta regions
createRootAndMetaRegions();
- FSUtils.createTableDescriptor(this.fs, this.testDir, this.desc);
+ FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc);
/*
* Create the regions we will merge
*/