diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java index f4fc438..152e695 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java @@ -28,7 +28,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupCopyService; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -38,6 +40,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.snapshot.ExportSnapshot; import org.apache.hadoop.mapreduce.Cluster; +import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.tools.DistCp; @@ -239,7 +242,8 @@ public class MapReduceBackupCopyService implements BackupCopyService { new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); String newProgressStr = progressData + "%"; - LOG.info("Progress: " + newProgressStr); + LOG.info("Progress: " + newProgressStr + " subTask: " + subTaskPercntgInWholeTask + + " mapProgress: " + job.mapProgress()); // accumulate the overall backup progress progressDone = newProgress; @@ -249,7 +253,9 @@ public class MapReduceBackupCopyService implements BackupCopyService { bytesCopied); LOG.debug("Backup progress data updated to hbase:backup: \"Progress: " + newProgressStr + " - " + bytesCopied + " bytes copied.\""); - + } catch (Throwable t) { + LOG.error("distcp " + job.getJobID() + " encountered error", t); + throw t; } finally { if (!fieldSubmitted.getBoolean(this)) { methodCleanup.invoke(this); @@ -259,7 +265,10 @@ public class MapReduceBackupCopyService implements BackupCopyService { String jobID = job.getJobID().toString(); job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); - LOG.debug("DistCp job-id: " + jobID); + LOG.debug("DistCp job-id: " + jobID + " completed: " + job.isComplete() + " " + + job.isSuccessful()); + Counters ctrs = job.getCounters(); + LOG.debug(ctrs); if (job.isComplete() && !job.isSuccessful()) { throw new Exception("DistCp job-id: " + jobID + " failed"); } @@ -303,12 +312,10 @@ public class MapReduceBackupCopyService implements BackupCopyService { // target as a file name and copy source file to the target (as a file name). // We need to create the target dir before run distcp. LOG.debug("DistCp options: " + Arrays.toString(options)); - if (options.length == 2) { - Path dest = new Path(options[1]); - FileSystem destfs = dest.getFileSystem(conf); - if (!destfs.exists(dest)) { - destfs.mkdirs(dest); - } + Path dest = new Path(options[options.length-1]); + FileSystem destfs = dest.getFileSystem(conf); + if (!destfs.exists(dest)) { + destfs.mkdirs(dest); } res = distcp.run(options); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java index fa08adf..130e53b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java @@ -24,7 +24,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.IncrementalRestoreService; @@ -62,13 +64,13 @@ public class MapReduceRestoreService implements IncrementalRestoreService { Path bulkOutputPath = getBulkOutputDir(getFileNameCompatibleString(newTableNames[i])); String[] playerArgs = - { logDirs, tableNames[i].getNameAsString(), newTableNames[i].getNameAsString()}; + { logDirs, tableNames[i].getNameAsString() }; int result = 0; int loaderResult = 0; try { Configuration conf = getConf(); - conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); + conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); player.setConf(getConf()); result = player.run(playerArgs); if (succeeded(result)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java index 6e90309..05a63ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java @@ -101,7 +101,7 @@ public class IncrementalTableBackupProcedure LOG.warn("Can't find file: " + file); } } - return list; + return list; } private List getMissingFiles(List incrBackupFileList) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java index 58cd4b2..6e95bfa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -30,8 +30,10 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.procedure.ProcedureMember; import org.apache.hadoop.hbase.procedure.Subprocedure; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * This backup subprocedure implementation forces a log roll on the RS. @@ -73,10 +75,18 @@ public class LogRollBackupSubprocedure extends Subprocedure { } hlog = (FSHLog) rss.getWAL(null); long filenum = hlog.getFilenum(); - - LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum); - hlog.rollWriter(true); - LOG.info("After roll log in backup subprocedure, current log number: " + hlog.getFilenum()); + long highest = rss.getHighestFilenum(); + + LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum + + " highest: " + highest + " on " + rss.getServerName()); + ((HRegionServer)rss).walRoller.requestRollAll(); + long start = EnvironmentEdgeManager.currentTime(); + while (!((HRegionServer)rss).walRoller.walRollFinished()) { + Thread.sleep(20); + } + LOG.debug("log roll took " + (EnvironmentEdgeManager.currentTime()-start)); + LOG.info("After roll log in backup subprocedure, current log number: " + hlog.getFilenum() + + " highest: " + rss.getHighestFilenum() + " on " + rss.getServerName()); Connection connection = rss.getConnection(); try(final BackupSystemTable table = new BackupSystemTable(connection)) { @@ -86,17 +96,17 @@ public class LogRollBackupSubprocedure extends Subprocedure { int port = rss.getServerName().getPort(); String server = host + ":" + port; Long sts = serverTimestampMap.get(host); - if (sts != null && sts > filenum) { + if (sts != null && sts > highest) { LOG.warn("Won't update server's last roll log result: current=" - + sts + " new=" + filenum); + + sts + " new=" + highest); return null; } // write the log number to hbase:backup. - table.writeRegionServerLastLogRollResult(server, filenum, backupRoot); + table.writeRegionServerLastLogRollResult(server, highest, backupRoot); return null; } catch (Exception e) { LOG.error(e); - throw e; // TODO: is this correct? + throw e; } } } @@ -125,7 +135,6 @@ public class LogRollBackupSubprocedure extends Subprocedure { @Override public byte[] insideBarrier() throws ForeignException { rolllog(); - // FIXME return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 2fa7a70..42e485c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -30,7 +30,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.wal.WAL; @@ -171,7 +173,7 @@ public class WALInputFormat extends InputFormat { temp = reader.next(currentEntry); i++; } catch (EOFException x) { - LOG.info("Corrupted entry detected. Ignoring the rest of the file." + LOG.warn("Corrupted entry detected. Ignoring the rest of the file." + " (This is normal when a RegionServer crashed.)"); return false; } @@ -262,9 +264,10 @@ public class WALInputFormat extends InputFormat { List result = new ArrayList(); LOG.debug("Scanning " + dir.toString() + " for WAL files"); - FileStatus[] files = fs.listStatus(dir); - if (files == null) return Collections.emptyList(); - for (FileStatus file : files) { + RemoteIterator iter = fs.listLocatedStatus(dir); + if (!iter.hasNext()) return Collections.emptyList(); + while (iter.hasNext()) { + LocatedFileStatus file = iter.next(); if (file.isDirectory()) { // recurse into sub directories result.addAll(getFiles(fs, file.getPath(), startTime, endTime)); @@ -275,7 +278,7 @@ public class WALInputFormat extends InputFormat { try { long fileStartTime = Long.parseLong(name.substring(idx+1)); if (fileStartTime <= endTime) { - LOG.info("Found: " + name); + LOG.info("Found: " + file); result.add(file); } } catch (NumberFormatException x) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 4cdbad3..f326cf7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -296,7 +296,7 @@ public class WALPlayer extends Configured implements Tool { String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { - LOG.debug("add incremental job :"+hfileOutPath); + LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs); // the bulk HFile case if (tables.length != 1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 81e56b2..b82e205 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -368,7 +368,7 @@ public class HRegionServer extends HasThread implements // WAL roller. log is protected rather than private to avoid // eclipse warning when accessed by inner classes - final LogRoller walRoller; + public final LogRoller walRoller; // Lazily initialized if this RegionServer hosts a meta table. final AtomicReference metawalRoller = new AtomicReference(); @@ -1874,6 +1874,11 @@ public class HRegionServer extends HasThread implements private static final byte[] UNSPECIFIED_REGION = new byte[]{}; @Override + public long getHighestFilenum() { + return walFactory.getHighestFilenum(); + } + + @Override public WAL getWAL(HRegionInfo regionInfo) throws IOException { WAL wal; LogRoller roller = walRoller; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 2a71629..93ce3d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -55,6 +55,11 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi * default (common) WAL */ WAL getWAL(HRegionInfo regionInfo) throws IOException; + /* + * @return the highest filenum among all the WAL files + */ + long getHighestFilenum(); + /** * @return Implementation of {@link CompactionRequestor} or null. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java index 8188e02..2939655 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java @@ -39,10 +39,12 @@ public abstract class WriterBase implements DefaultWALProvider.Writer { protected CompressionContext compressionContext; protected Configuration conf; + protected Path path; @Override public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable) throws IOException { this.conf = conf; + this.path = path; } public boolean initializeCompressionContext(Configuration conf, Path path) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java index dd4d337..feeff95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java @@ -118,6 +118,14 @@ public class DefaultWALProvider implements WALProvider { } @Override + public long getHighestFilenum() { + if (log == null) { + return 0; + } + return log.getFilenum(); + } + + @Override public WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException { if (log == null) { // only lock when need to create wal, and need to lock since diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java index c3d4b2c..0b21c5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java @@ -65,6 +65,11 @@ class DisabledWALProvider implements WALProvider { } @Override + public long getHighestFilenum() { + return 0; + } + + @Override public WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException { return disabled; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java index 0aeaccf..0d021ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java @@ -184,6 +184,19 @@ class RegionGroupingProvider implements WALProvider { return log; } + @Override + public long getHighestFilenum() { + long filenum = 0; + synchronized (this.walCacheLock) { + for (WAL log : cached.values()) { + if (((FSHLog)log).getFilenum() > filenum) { + filenum = ((FSHLog)log).getFilenum(); + } + } + } + return filenum; + } + private WAL getWAL(final String group) throws IOException { WAL log = cached.get(group); if (null == log) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 08f42aa..9306a70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -223,6 +223,10 @@ public class WALFactory { } } + public long getHighestFilenum() { + return provider.getHighestFilenum(); + } + /** * @param identifier may not be null, contents will not be altered * @param namespace could be null, and will use default namespace if null diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java index 2c500dc..746a2f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALProvider.java @@ -58,6 +58,11 @@ public interface WALProvider { */ WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException; + /* + * @return the highest filenum among all the WAL files + */ + long getHighestFilenum(); + /** * persist outstanding WALs to storage and stop accepting new appends. * This method serves as shorthand for sending a sync to every WAL provided by a given diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index e634327..c80cacc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -244,6 +244,11 @@ public class MockRegionServerServices implements RegionServerServices { } @Override + public long getHighestFilenum() { + return 0; + } + + @Override public WAL getWAL(HRegionInfo regionInfo) throws IOException { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 492a0f2..7be9f58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -77,7 +78,7 @@ public class TestBackupBase { protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore"); protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore"); - protected static final int NB_ROWS_IN_BATCH = 999; + protected static final int NB_ROWS_IN_BATCH = 99; protected static final byte[] qualName = Bytes.toBytes("q1"); protected static final byte[] famName = Bytes.toBytes("f"); @@ -93,7 +94,7 @@ public class TestBackupBase { conf1 = TEST_UTIL.getConfiguration(); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); // Set MultiWAL (with 2 default WAL files per RS) - //conf1.set(WAL_PROVIDER, "multiwal"); + conf1.set(WALFactory.WAL_PROVIDER, "multiwal"); TEST_UTIL.startMiniZKCluster(); MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 0f35026..bfd9d01 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -61,6 +61,7 @@ public class TestIncrementalBackup extends TestBackupBase { assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table + LOG.debug("writing " + NB_ROWS_IN_BATCH + " rows to " + table1); HTable t1 = (HTable) conn.getTable(table1); Put p1; for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { @@ -71,6 +72,7 @@ public class TestIncrementalBackup extends TestBackupBase { Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); t1.close(); + LOG.debug("written " + NB_ROWS_IN_BATCH + " rows to " + table1); HTable t2 = (HTable) conn.getTable(table2); Put p2; @@ -82,6 +84,7 @@ public class TestIncrementalBackup extends TestBackupBase { Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); t2.close(); + LOG.debug("written " + 5 + " rows to " + table2); // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2, table3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 4de4a5f..bc24dd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -562,8 +562,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override + public long getHighestFilenum() { + return 0; + } + + @Override public WAL getWAL(HRegionInfo regionInfo) throws IOException { - // TODO Auto-generated method stub return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java index 6b1ca03..3a04f75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java @@ -108,6 +108,11 @@ public class IOTestProvider implements WALProvider { } @Override + public long getHighestFilenum() { + return log.getFilenum(); + } + + @Override public WAL getWAL(final byte[] identifier, byte[] namespace) throws IOException { return log; }