diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 04a91a6..1b7c884 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.backup.master; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -33,8 +34,12 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; +import com.google.common.annotations.VisibleForTesting; + /** * Implementation of a log cleaner that checks if a log is still scheduled for * incremental backup before deleting it when its TTL is over. @@ -45,11 +50,30 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate { private static final Log LOG = LogFactory.getLog(BackupLogCleaner.class); private boolean stopped = false; + private Connection conn; public BackupLogCleaner() { } @Override + public void init(Map params) { + if (params != null && params.containsKey(HMaster.MASTER)) { + MasterServices master = (MasterServices) params.get(HMaster.MASTER); + conn = master.getConnection(); + if (getConf() == null) { + super.setConf(conn.getConfiguration()); + } + } + if (conn == null) { + try { + conn = ConnectionFactory.createConnection(getConf()); + } catch (IOException ioe) { + throw new RuntimeException("Failed to create connection", ioe); + } + } + } + + @Override public Iterable getDeletableFiles(Iterable files) { // all members of this class are null if backup is disabled, // so we cannot filter the files @@ -58,11 +82,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate { } List list = new ArrayList(); - // TODO: LogCleaners do not have a way to get the Connection from Master. We should find a - // way to pass it down here, so that this connection is not re-created every time. - // It is expensive - try (final Connection conn = ConnectionFactory.createConnection(getConf()); - final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { // If we do not have recorded backup sessions if (!table.hasBackupSessions()) { LOG.debug("BackupLogCleaner has no backup sessions"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java deleted file mode 100644 index 3ef68e6..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.testclassification.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import com.google.common.collect.Iterables; -import com.google.common.collect.Lists; - -@Category(LargeTests.class) -public class TestBackupLogCleaner extends TestBackupBase { - private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class); - - // implements all test cases in 1 test since incremental full backup/ - // incremental backup has dependencies - @Test - public void testBackupLogCleaner() throws Exception { - - // #1 - create full backup for all tables - LOG.info("create full backup image for all tables"); - - List tableSetFullList = Lists.newArrayList(table1, table2, table3, table4); - - try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { - // Verify that we have no backup sessions yet - assertFalse(systemTable.hasBackupSessions()); - - List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); - List swalFiles = convert(walFiles); - BackupLogCleaner cleaner = new BackupLogCleaner(); - cleaner.setConf(TEST_UTIL.getConfiguration()); - - Iterable deletable = cleaner.getDeletableFiles(walFiles); - int size = Iterables.size(deletable); - - // We can delete all files because we do not have yet recorded backup sessions - assertTrue(size == walFiles.size()); - - systemTable.addWALFiles(swalFiles, "backup", "root"); - String backupIdFull = fullTableBackup(tableSetFullList); - assertTrue(checkSucceeded(backupIdFull)); - // Check one more time - deletable = cleaner.getDeletableFiles(walFiles); - // We can delete wal files because they were saved into hbase:backup table - size = Iterables.size(deletable); - assertTrue(size == walFiles.size()); - - List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); - LOG.debug("WAL list after full backup"); - convert(newWalFiles); - - // New list of wal files is greater than the previous one, - // because new wal per RS have been opened after full backup - assertTrue(walFiles.size() < newWalFiles.size()); - Connection conn = ConnectionFactory.createConnection(conf1); - // #2 - insert some data to table - HTable t1 = (HTable) conn.getTable(table1); - Put p1; - for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { - p1 = new Put(Bytes.toBytes("row-t1" + i)); - p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t1.put(p1); - } - - t1.close(); - - HTable t2 = (HTable) conn.getTable(table2); - Put p2; - for (int i = 0; i < 5; i++) { - p2 = new Put(Bytes.toBytes("row-t2" + i)); - p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t2.put(p2); - } - - t2.close(); - - // #3 - incremental backup for multiple tables - - List tableSetIncList = Lists.newArrayList(table1, table2, table3); - String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, - BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupIdIncMultiple)); - deletable = cleaner.getDeletableFiles(newWalFiles); - - assertTrue(Iterables.size(deletable) == newWalFiles.size()); - - conn.close(); - } - } - - private List convert(List walFiles) { - List result = new ArrayList(); - for (FileStatus fs : walFiles) { - LOG.debug("+++WAL: " + fs.getPath().toString()); - result.add(fs.getPath().toString()); - } - return result; - } - - private List getListOfWALFiles(Configuration c) throws IOException { - Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME); - FileSystem fs = FileSystem.get(c); - RemoteIterator it = fs.listFiles(logRoot, true); - List logFiles = new ArrayList(); - while (it.hasNext()) { - LocatedFileStatus lfs = it.next(); - if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) { - logFiles.add(lfs); - LOG.info(lfs); - } - } - return logFiles; - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java new file mode 100644 index 0000000..c74edb0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.TestBackupBase; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupLogCleaner extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class); + + // implements all test cases in 1 test since incremental full backup/ + // incremental backup has dependencies + @Test + public void testBackupLogCleaner() throws Exception { + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tableSetFullList = Lists.newArrayList(table1, table2, table3, table4); + + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Verify that we have no backup sessions yet + assertFalse(systemTable.hasBackupSessions()); + + List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + List swalFiles = convert(walFiles); + BackupLogCleaner cleaner = new BackupLogCleaner(); + cleaner.setConf(TEST_UTIL.getConfiguration()); + cleaner.init(null); + cleaner.setConf(TEST_UTIL.getConfiguration()); + + Iterable deletable = cleaner.getDeletableFiles(walFiles); + int size = Iterables.size(deletable); + + // We can delete all files because we do not have yet recorded backup sessions + assertTrue(size == walFiles.size()); + + systemTable.addWALFiles(swalFiles, "backup", "root"); + String backupIdFull = fullTableBackup(tableSetFullList); + assertTrue(checkSucceeded(backupIdFull)); + // Check one more time + deletable = cleaner.getDeletableFiles(walFiles); + // We can delete wal files because they were saved into hbase:backup table + size = Iterables.size(deletable); + assertTrue(size == walFiles.size()); + + List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + LOG.debug("WAL list after full backup"); + convert(newWalFiles); + + // New list of wal files is greater than the previous one, + // because new wal per RS have been opened after full backup + assertTrue(walFiles.size() < newWalFiles.size()); + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + t2.close(); + + // #3 - incremental backup for multiple tables + + List tableSetIncList = Lists.newArrayList(table1, table2, table3); + String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, + BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdIncMultiple)); + deletable = cleaner.getDeletableFiles(newWalFiles); + + assertTrue(Iterables.size(deletable) == newWalFiles.size()); + + conn.close(); + } + } + + private List convert(List walFiles) { + List result = new ArrayList(); + for (FileStatus fs : walFiles) { + LOG.debug("+++WAL: " + fs.getPath().toString()); + result.add(fs.getPath().toString()); + } + return result; + } + + private List getListOfWALFiles(Configuration c) throws IOException { + Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME); + FileSystem fs = FileSystem.get(c); + RemoteIterator it = fs.listFiles(logRoot, true); + List logFiles = new ArrayList(); + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) { + logFiles.add(lfs); + LOG.info(lfs); + } + } + return logFiles; + } + +}