From 6ade3620d35b5a9423107faa813223fef0b4ad5b Mon Sep 17 00:00:00 2001 From: chenheng Date: Wed, 24 Aug 2016 16:19:16 +0800 Subject: [PATCH] HBASE-16490 Fix race condition between SnapshotManager and SnapshotCleaner --- .../org/apache/hadoop/hbase/master/HMaster.java | 5 ++- .../master/cleaner/BaseFileCleanerDelegate.java | 7 +++ .../hadoop/hbase/master/cleaner/CleanerChore.java | 16 ++++++- .../hbase/master/cleaner/FileCleanerDelegate.java | 8 ++++ .../hadoop/hbase/master/cleaner/HFileCleaner.java | 12 +++++- .../snapshot/DisabledTableSnapshotHandler.java | 4 +- .../snapshot/EnabledTableSnapshotHandler.java | 2 +- .../hbase/master/snapshot/SnapshotFileCache.java | 29 +++++++++---- .../master/snapshot/SnapshotHFileCleaner.java | 14 +++++- .../hbase/master/snapshot/SnapshotManager.java | 50 +++++++++++++++++++++- .../hbase/master/snapshot/TakeSnapshotHandler.java | 9 +++- .../master/snapshot/TestSnapshotFileCache.java | 6 +-- .../master/snapshot/TestSnapshotHFileCleaner.java | 13 +++--- 13 files changed, 144 insertions(+), 31 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 5b3984b..f4c2c1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -35,6 +35,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -973,8 +974,10 @@ public class HMaster extends HRegionServer implements MasterServices { //start the hfile archive cleaner thread Path archiveDir = HFileArchiveUtil.getArchivePath(conf); + Map params = new HashMap(); + params.put(MASTER, this); this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem() - .getFileSystem(), archiveDir); + .getFileSystem(), archiveDir, params); getChoreService().scheduleChore(hfileCleaner); serviceStarted = true; if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java index c6955d0..891db22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.BaseConfigurable; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; +import java.util.Map; + /** * Base class for file cleaners which allows subclasses to implement a simple * isFileDeletable method (which used to be the FileCleanerDelegate contract). @@ -39,6 +41,11 @@ implements FileCleanerDelegate { }}); } + @Override + public void init(Map params) { + // subclass could override it if needed. + } + /** * Should the master delete the file or keep it? * @param fStat file status of the file to check diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index 5a93a6d..d35f403 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.cleaner; import java.io.IOException; import java.util.LinkedList; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -29,6 +30,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.ipc.RemoteException; @@ -49,6 +52,12 @@ public abstract class CleanerChore extends Schedu private final Path oldFileDir; private final Configuration conf; protected List cleanersChain; + protected Map params; + + public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf, + FileSystem fs, Path oldFileDir, String confKey) { + this(name, sleepPeriod, s, conf, fs, oldFileDir, confKey, null); + } /** * @param name name of the chore being run @@ -58,17 +67,19 @@ public abstract class CleanerChore extends Schedu * @param fs handle to the FS * @param oldFileDir the path to the archived files * @param confKey configuration key for the classes to instantiate + * @param params members could be used in cleaner */ public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf, - FileSystem fs, Path oldFileDir, String confKey) { + FileSystem fs, Path oldFileDir, String confKey, Map params) { super(name, s, sleepPeriod); this.fs = fs; this.oldFileDir = oldFileDir; this.conf = conf; - + this.params = params; initCleanerChain(confKey); } + /** * Validate the file to see if it even belongs in the directory. If it is valid, then the file * will go through the cleaner delegates, but otherwise the file is just deleted. @@ -109,6 +120,7 @@ public abstract class CleanerChore extends Schedu @SuppressWarnings("unchecked") T cleaner = (T) c.newInstance(); cleaner.setConf(conf); + cleaner.init(this.params); return cleaner; } catch (Exception e) { LOG.warn("Can NOT create CleanerDelegate: " + className, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java index b11fd80..7a15b96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java @@ -22,6 +22,8 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.Stoppable; +import java.util.Map; + /** * General interface for cleaning files from a folder (generally an archive or * backup folder). These are chained via the {@link CleanerChore} to determine @@ -36,4 +38,10 @@ public interface FileCleanerDelegate extends Configurable, Stoppable { * @return files that are ok to delete according to this cleaner */ Iterable getDeletableFiles(Iterable files); + + + /** + * this method is used to pass some instance into subclass + * */ + void init(Map params); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java index 2785155..89c316b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master.cleaner; import java.util.List; +import java.util.Map; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -35,16 +36,23 @@ public class HFileCleaner extends CleanerChore { public static final String MASTER_HFILE_CLEANER_PLUGINS = "hbase.master.hfilecleaner.plugins"; + public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs, + Path directory) { + this(period, stopper, conf, fs, directory, null); + } + /** * @param period the period of time to sleep between each run * @param stopper the stopper * @param conf configuration to use * @param fs handle to the FS * @param directory directory to be cleaned + * @param params params could be used in subclass of BaseHFileCleanerDelegate */ public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs, - Path directory) { - super("HFileCleaner", period, stopper, conf, fs, directory, MASTER_HFILE_CLEANER_PLUGINS); + Path directory, Map params) { + super("HFileCleaner", period, stopper, conf, fs, + directory, MASTER_HFILE_CLEANER_PLUGINS, params); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java index 5d59229..a7c2652 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java @@ -58,8 +58,8 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler { * @param masterServices master services provider */ public DisabledTableSnapshotHandler(SnapshotDescription snapshot, - final MasterServices masterServices) { - super(snapshot, masterServices); + final MasterServices masterServices, final SnapshotManager snapshotManager) { + super(snapshot, masterServices, snapshotManager); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java index 7e047ac..f545a82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java @@ -50,7 +50,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler { public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master, final SnapshotManager manager) { - super(snapshot, master); + super(snapshot, master, manager); this.coordinator = manager.getCoordinator(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java index 5b367c5..b51bba7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java @@ -177,7 +177,8 @@ public class SnapshotFileCache implements Stoppable { // XXX this is inefficient to synchronize on the method, when what we really need to guard against // is an illegal access to the cache. Really we could do a mutex-guarded pointer swap on the // cache, but that seems overkill at the moment and isn't necessarily a bottleneck. - public synchronized Iterable getUnreferencedFiles(Iterable files) + public synchronized Iterable getUnreferencedFiles(Iterable files, + final SnapshotManager snapshotManager) throws IOException { List unReferencedFiles = Lists.newArrayList(); List snapshotsInProgress = null; @@ -192,7 +193,7 @@ public class SnapshotFileCache implements Stoppable { continue; } if (snapshotsInProgress == null) { - snapshotsInProgress = getSnapshotsInProgress(); + snapshotsInProgress = getSnapshotsInProgress(snapshotManager); } if (snapshotsInProgress.contains(fileName)) { continue; @@ -292,8 +293,13 @@ public class SnapshotFileCache implements Stoppable { this.snapshots.clear(); this.snapshots.putAll(known); } - + @VisibleForTesting List getSnapshotsInProgress() throws IOException { + return getSnapshotsInProgress(null); + } + + List getSnapshotsInProgress( + final SnapshotManager snapshotManager) throws IOException { List snapshotInProgress = Lists.newArrayList(); // only add those files to the cache, but not to the known snapshots Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME); @@ -301,20 +307,25 @@ public class SnapshotFileCache implements Stoppable { FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir); if (running != null) { for (FileStatus run : running) { + SnapshotManager.Entry entry = null; + if (snapshotManager != null) { + entry = snapshotManager.acquireLockEntry(run.getPath().getName()); + } try { snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath())); } catch (CorruptedSnapshotException e) { // See HBASE-16464 if (e.getCause() instanceof FileNotFoundException) { - // If the snapshot is not in progress, we will delete it - if (!fs.exists(new Path(run.getPath(), - SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) { - fs.delete(run.getPath(), true); - LOG.warn("delete the " + run.getPath() + " due to exception:", e.getCause()); - } + // If the snapshot is corrupt, we will delete it + fs.delete(run.getPath(), true); + LOG.warn("delete the " + run.getPath() + " due to exception:", e.getCause()); } else { throw e; } + } finally { + if (snapshotManager != null && entry != null) { + snapshotManager.releaseLockEntry(entry); + } } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java index df03d63..0e14bed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.snapshot; import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -30,6 +31,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; @@ -57,10 +59,12 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { /** File cache for HFiles in the completed and currently running snapshots */ private SnapshotFileCache cache; + private HMaster master; + @Override public synchronized Iterable getDeletableFiles(Iterable files) { try { - return cache.getUnreferencedFiles(files); + return cache.getUnreferencedFiles(files, master.getSnapshotManager()); } catch (CorruptedSnapshotException cse) { LOG.debug("Corrupted in-progress snapshot file exception, ignored ", cse); } catch (IOException e) { @@ -70,6 +74,13 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { } @Override + public void init(Map params) { + if (params.containsKey(HMaster.MASTER)) { + this.master = (HMaster) params.get(HMaster.MASTER); + } + } + + @Override protected boolean isFileDeletable(FileStatus fStat) { return false; } @@ -93,6 +104,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { } } + @Override public void stop(String why) { this.cache.stop(why); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 0304e38..29ea789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -27,7 +27,10 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -84,6 +87,7 @@ import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.WAL; import org.apache.zookeeper.KeeperException; /** @@ -160,6 +164,25 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable private Path rootDir; private ExecutorService executorService; + /** + * Locks for snapshot operations + * key is snapshot's filename in progress, value is the relates lock + * - create snapshot + * - SnapshotCleaner + * */ + private ConcurrentMap locks = + new ConcurrentHashMap(); + + class Entry { + String filename; + Object lock; + public Entry(String filename) { + this.filename = filename; + lock = new Object(); + } + } + + public SnapshotManager() {} /** @@ -471,7 +494,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // Take the snapshot of the disabled table DisabledTableSnapshotHandler handler = - new DisabledTableSnapshotHandler(snapshot, master); + new DisabledTableSnapshotHandler(snapshot, master, this); snapshotTable(snapshot, handler); } @@ -1172,4 +1195,29 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable builder.setType(SnapshotDescription.Type.FLUSH); return builder.build(); } + + public Entry acquireLockEntry(String filename) { + Entry entry = new Entry(filename); + Entry existing = locks.putIfAbsent(filename, entry); + if (existing != null) { + synchronized (existing) { + if (locks.get(filename) != null) { //double check if the entry has been removed in locks + try { + existing.wait(); + } catch (InterruptedException e) { + LOG.warn("interrupt the waiting on " + filename); + } + return existing; + } + } + } + return entry; + } + + public void releaseLockEntry(Entry entry) { + synchronized (entry) { + entry.notifyAll(); + locks.remove(entry.filename); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 8967a70..166e3da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -87,6 +87,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh protected final MonitoredTask status; protected final TableName snapshotTable; protected final SnapshotManifest snapshotManifest; + protected final SnapshotManager snapshotManager; protected HTableDescriptor htd; @@ -94,13 +95,15 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh * @param snapshot descriptor of the snapshot to take * @param masterServices master services provider */ - public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices) { + public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices, + final SnapshotManager snapshotManager) { super(masterServices, EventType.C_M_SNAPSHOT_TABLE); assert snapshot != null : "SnapshotDescription must not be nul1"; assert masterServices != null : "MasterServices must not be nul1"; this.master = masterServices; this.snapshot = snapshot; + this.snapshotManager = snapshotManager; this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = this.master.getConfiguration(); this.fs = this.master.getMasterFileSystem().getFileSystem(); @@ -160,11 +163,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " " + eventType + " on table " + snapshotTable; LOG.info(msg); + SnapshotManager.Entry entry = snapshotManager.acquireLockEntry(snapshot.getName()); status.setStatus(msg); try { // If regions move after this meta scan, the region specific snapshot should fail, triggering // an external exception that gets captured here. - SnapshotDescriptionUtils.createInProgressTag(workingDir, fs); + // write down the snapshot info in the working directory SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs); snapshotManifest.addTableDescriptor(this.htd); @@ -228,6 +232,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh } catch (IOException e) { LOG.error("Couldn't delete snapshot working directory:" + workingDir); } + snapshotManager.releaseLockEntry(entry); releaseTableLock(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java index 2e0c14c..2b69943 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java @@ -159,7 +159,7 @@ public class TestSnapshotFileCache { FSUtils.logFileSystemState(fs, rootDir, LOG); List allStoreFiles = getStoreFilesForSnapshot(complete); - Iterable deletableFiles = cache.getUnreferencedFiles(allStoreFiles); + Iterable deletableFiles = cache.getUnreferencedFiles(allStoreFiles, null); assertTrue(Iterables.isEmpty(deletableFiles)); // no need for tmp dir check as all files are accounted for. assertEquals(0, count.get() - countBeforeCheck); @@ -168,7 +168,7 @@ public class TestSnapshotFileCache { // add a random file to make sure we refresh FileStatus randomFile = mockStoreFile(UUID.randomUUID().toString()); allStoreFiles.add(randomFile); - deletableFiles = cache.getUnreferencedFiles(allStoreFiles); + deletableFiles = cache.getUnreferencedFiles(allStoreFiles, null); assertEquals(randomFile, Iterables.getOnlyElement(deletableFiles)); assertEquals(1, count.get() - countBeforeCheck); // we check the tmp directory } @@ -275,7 +275,7 @@ public class TestSnapshotFileCache { private Iterable getNonSnapshotFiles(SnapshotFileCache cache, Path storeFile) throws IOException { return cache.getUnreferencedFiles( - Arrays.asList(FSUtils.listStatus(fs, storeFile.getParent())) + Arrays.asList(FSUtils.listStatus(fs, storeFile.getParent())), null ); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java index 88ca5a0..7556c94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java @@ -169,22 +169,21 @@ public class TestSnapshotHFileCleaner { } } - /** - * HBASE-16464 - */ + * HBASE-16464 + */ @Test public void testMissedTmpSnapshot() throws IOException { SnapshotTestingUtils.SnapshotMock - snapshotMock = new SnapshotTestingUtils.SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir); + snapshotMock = new SnapshotTestingUtils.SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir); SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2( - SNAPSHOT_NAME_STR, TABLE_NAME_STR); + SNAPSHOT_NAME_STR, TABLE_NAME_STR); builder.addRegionV2(); builder.missOneRegionSnapshotFile(); - long period = Long.MAX_VALUE; + long period = Long.MAX_VALUE; SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000, - "test-snapshot-file-cache-refresh", new SnapshotFiles()); + "test-snapshot-file-cache-refresh", new SnapshotFiles()); cache.getSnapshotsInProgress(); assertFalse(fs.exists(builder.getSnapshotsDir())); } -- 1.9.3 (Apple Git-50)