diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index 4d6b2a7..cbb9428 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -310,6 +310,10 @@ public class BackupInfo implements Comparable { } public void setIncrBackupFileList(List incrBackupFileList) { + LOG.debug("setting incr backup file list"); + for (String file : incrBackupFileList) { + LOG.debug(file); + } this.incrBackupFileList = incrBackupFileList; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index bd496ce..34d41e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -187,6 +187,8 @@ public class IncrementalBackupManager { } if (tss > oldTss && tss < newTss) { logFiles.add(item); + } else { + LOG.debug("skipping wal " + item); } } return logFiles; @@ -270,6 +272,9 @@ public class IncrementalBackupManager { // so newestTimestamps.get(host) will not be null. if (Long.valueOf(currentLogTS) > Long.valueOf(newestTimestamps.get(host))) { newestLogs.add(currentLogFile); + } else { + LOG.debug("excluding " + currentLogFile + " " + currentLogTS + " <= " + + newestTimestamps.get(host)); } } } @@ -300,12 +305,15 @@ public class IncrementalBackupManager { if (oldTimeStamp == null) { if (Long.valueOf(currentLogTS) < Long.valueOf(savedStartCode)) { // This log file is really old, its region server was before our last backup. + LOG.debug("excluding old " + currentLogFile + " " + currentLogTS + " < " +savedStartCode); continue; } else { resultLogFiles.add(currentLogFile); } } else if (Long.valueOf(currentLogTS) > Long.valueOf(oldTimeStamp)) { resultLogFiles.add(currentLogFile); + } else { + LOG.debug("excluding old " + currentLogFile + " " + currentLogTS + " <= " + oldTimeStamp); } // It is possible that a host in .oldlogs is an obsolete region server @@ -314,6 +322,7 @@ public class IncrementalBackupManager { // to include they to avoid loss of edits for backup. Long newTimestamp = newestTimestamps.get(host); if (newTimestamp != null && Long.valueOf(currentLogTS) > Long.valueOf(newTimestamp)) { + LOG.debug("newest log " + currentLogFile); newestLogs.add(currentLogFile); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 31f05c2..a649cc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -662,6 +662,7 @@ public class FSHLog implements WAL { private void preemptiveSync(final ProtobufLogWriter nextWriter) { long startTimeNanos = System.nanoTime(); try { + LOG.debug("syncing writer " + ((WriterBase)nextWriter).path); nextWriter.sync(); postSync(System.nanoTime() - startTimeNanos, 0); } catch (IOException e) { @@ -1271,6 +1272,7 @@ public class FSHLog implements WAL { Throwable lastException = null; try { Trace.addTimelineAnnotation("syncing writer"); + LOG.debug("syncing writer " + ((WriterBase)writer).path); writer.sync(); Trace.addTimelineAnnotation("writer synced"); currentSequence = updateHighestSyncedSequence(currentSequence); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java index 8188e02..2939655 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WriterBase.java @@ -39,10 +39,12 @@ public abstract class WriterBase implements DefaultWALProvider.Writer { protected CompressionContext compressionContext; protected Configuration conf; + protected Path path; @Override public void init(FileSystem fs, Path path, Configuration conf, boolean overwritable) throws IOException { this.conf = conf; + this.path = path; } public boolean initializeCompressionContext(Configuration conf, Path path) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java index 65c774e..9f8377e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedGroupingStrategy.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.wal.RegionGroupingProvider.RegionGroupingStrategy public class BoundedGroupingStrategy implements RegionGroupingStrategy{ static final String NUM_REGION_GROUPS = "hbase.wal.regiongrouping.numgroups"; - static final int DEFAULT_NUM_REGION_GROUPS = 2; + static final int DEFAULT_NUM_REGION_GROUPS = 4; private ConcurrentHashMap groupNameCache = new ConcurrentHashMap(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 08f42aa..8296294 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -74,7 +74,7 @@ public class WALFactory { * Maps between configuration names for providers and implementation classes. */ static enum Providers { - defaultProvider(DefaultWALProvider.class), + defaultProvider(RegionGroupingProvider.class), filesystem(DefaultWALProvider.class), multiwal(RegionGroupingProvider.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 492a0f2..78d8290 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -77,7 +77,7 @@ public class TestBackupBase { protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore"); protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore"); - protected static final int NB_ROWS_IN_BATCH = 999; + protected static final int NB_ROWS_IN_BATCH = 199; protected static final byte[] qualName = Bytes.toBytes("q1"); protected static final byte[] famName = Bytes.toBytes("f"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java index 3ef68e6..e6bd675 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java @@ -55,6 +55,13 @@ import com.google.common.collect.Lists; public class TestBackupLogCleaner extends TestBackupBase { private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class); + void logFiles(List files, String subj) { + LOG.debug(subj + " WAL"); + for (FileStatus f : files) { + LOG.debug(f.getPath()); + } + } + // implements all test cases in 1 test since incremental full backup/ // incremental backup has dependencies @Test @@ -95,7 +102,9 @@ public class TestBackupLogCleaner extends TestBackupBase { // New list of wal files is greater than the previous one, // because new wal per RS have been opened after full backup - assertTrue(walFiles.size() < newWalFiles.size()); + logFiles(walFiles, "prev"); + logFiles(newWalFiles, "curr"); + assertTrue(walFiles.size() <= newWalFiles.size()); Connection conn = ConnectionFactory.createConnection(conf1); // #2 - insert some data to table HTable t1 = (HTable) conn.getTable(table1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 0f35026..59933ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -61,6 +61,7 @@ public class TestIncrementalBackup extends TestBackupBase { assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table + LOG.debug("writing " + NB_ROWS_IN_BATCH + " rows to " + table1); HTable t1 = (HTable) conn.getTable(table1); Put p1; for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { @@ -71,6 +72,7 @@ public class TestIncrementalBackup extends TestBackupBase { Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); t1.close(); + LOG.debug("written " + NB_ROWS_IN_BATCH + " rows to " + table1); HTable t2 = (HTable) conn.getTable(table2); Put p2; @@ -82,6 +84,7 @@ public class TestIncrementalBackup extends TestBackupBase { Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); t2.close(); + LOG.debug("written " + NB_ROWS_IN_BATCH + " rows to " + table2); // #3 - incremental backup for multiple tables tables = Lists.newArrayList(table1, table2, table3);