diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index 90395cb..0b89252 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -87,7 +87,10 @@ public class MapReduceRestoreJob implements RestoreJob { Path bulkOutputPath = getBulkOutputDir(getFileNameCompatibleString(newTableNames[i])); Configuration conf = getConf(); conf.set(bulkOutputConfKey, bulkOutputPath.toString()); - String[] playerArgs = { dirs, tableNames[i].getNameAsString() }; + String[] playerArgs = + { dirs, + fullBackupRestore?newTableNames[i].getNameAsString():tableNames[i].getNameAsString() + }; int result = 0; int loaderResult = 0; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java index 95b5978..191fac8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java @@ -51,8 +51,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; -import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; @@ -286,47 +284,6 @@ public class RestoreServerUtil { return tableDescriptor; } - /** - * Duplicate the backup image if it's on local cluster - * @see HStore#bulkLoadHFile(String, long) - * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum) - * @param tableArchivePath archive path - * @return the new tableArchivePath - * @throws IOException exception - */ - Path checkLocalAndBackup(Path tableArchivePath) throws IOException { - // Move the file if it's on local cluster - boolean isCopyNeeded = false; - - FileSystem srcFs = tableArchivePath.getFileSystem(conf); - FileSystem desFs = FileSystem.get(conf); - if (tableArchivePath.getName().startsWith("/")) { - isCopyNeeded = true; - } else { - // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path, - // long) - if (srcFs.getUri().equals(desFs.getUri())) { - LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: " - + desFs.getUri()); - isCopyNeeded = true; - } - } - if (isCopyNeeded) { - LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore"); - if (desFs.exists(restoreTmpPath)) { - try { - desFs.delete(restoreTmpPath, true); - } catch (IOException e) { - LOG.debug("Failed to delete path: " + restoreTmpPath - + ", need to check whether restore target DFS cluster is healthy"); - } - } - FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf); - LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath); - tableArchivePath = restoreTmpPath; - } - return tableArchivePath; - } private HTableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName, String lastIncrBackupId) throws IOException { @@ -408,33 +365,13 @@ public class RestoreServerUtil { // the regions in fine grain checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists); - if (tableArchivePath != null) { - // start real restore through bulkload - // if the backup target is on local cluster, special action needed - Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath); - if (tempTableArchivePath.equals(tableArchivePath)) { - if (LOG.isDebugEnabled()) { - LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath); - } - } else { - regionPathList = getRegionList(tempTableArchivePath); // point to the tempDir - if (LOG.isDebugEnabled()) { - LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath); - } - } + RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); + Path[] paths = new Path[regionPathList.size()]; + regionPathList.toArray(paths); + restoreService.run(paths, new TableName[]{tableName}, new TableName[] {newTableName}, true); - LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false); - for (Path regionPath : regionPathList) { - String regionName = regionPath.toString(); - if (LOG.isDebugEnabled()) { - LOG.debug("Restoring HFiles from directory " + regionName); - } - String[] args = { regionName, newTableName.getNameAsString() }; - loader.run(args); - } - } - // we do not recovered edits } catch (Exception e) { + LOG.error(e); throw new IllegalStateException("Cannot restore hbase table", e); } } @@ -458,28 +395,6 @@ public class RestoreServerUtil { } /** - * Create a {@link LoadIncrementalHFiles} instance to be used to restore the HFiles of a full - * backup. - * @return the {@link LoadIncrementalHFiles} instance - * @throws IOException exception - */ - private LoadIncrementalHFiles createLoader(Path tableArchivePath, boolean multipleTables) - throws IOException { - - // By default, it is 32 and loader will fail if # of files in any region exceed this - // limit. Bad for snapshot restore. - this.conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE); - this.conf.set(LoadIncrementalHFiles.IGNORE_UNMATCHED_CF_CONF_KEY, "yes"); - LoadIncrementalHFiles loader = null; - try { - loader = new LoadIncrementalHFiles(this.conf); - } catch (Exception e1) { - throw new IOException(e1); - } - return loader; - } - - /** * Calculate region boundaries and add all the column families to the table descriptor * @param regionDirList region dir list * @return a set of keys to store the boundaries