diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 1ba3af2..9e3edfc 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -3063,7 +3063,7 @@ public final class ProtobufUtil { * @param builder current message builder * @param in InputStream containing protobuf data * @param size known size of protobuf data - * @throws IOException + * @throws IOException */ public static void mergeFrom(Message.Builder builder, InputStream in, int size) throws IOException { @@ -3078,7 +3078,7 @@ public final class ProtobufUtil { * buffers where the message size is not known * @param builder current message builder * @param in InputStream containing protobuf data - * @throws IOException + * @throws IOException */ public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException { @@ -3092,8 +3092,8 @@ public final class ProtobufUtil { * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding * buffers when working with ByteStrings * @param builder current message builder - * @param bs ByteString containing the - * @throws IOException + * @param bs ByteString containing the + * @throws IOException */ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException { final CodedInputStream codedInput = bs.newCodedInput(); @@ -3107,7 +3107,7 @@ public final class ProtobufUtil { * buffers when working with byte arrays * @param builder current message builder * @param b byte array - * @throws IOException + * @throws IOException */ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b); diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index 95363e5..da197f1 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -30,9 +30,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.backup.BackupAdmin; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.backup.BackupAdmin; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreRequest; @@ -80,6 +80,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { private static int rowsInBatch; private static String BACKUP_ROOT_DIR = "backupIT"; + @Override @Before public void setUp() throws Exception { util = new IntegrationTestingUtility(); @@ -89,7 +90,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { rowsInBatch = util.getConfiguration().getInt(NB_ROWS_IN_BATCH_KEY, DEFAULT_NB_ROWS_IN_BATCH); LOG.info(String.format("Initializing cluster with %d region servers.", regionServerCount)); util.initializeCluster(regionServerCount); - LOG.info("Cluster initialized"); + LOG.info("Cluster initialized"); util.deleteTableIfAny(TABLE_NAME1); util.deleteTableIfAny(TABLE_NAME2); util.waitTableAvailable(BackupSystemTable.getTableName()); @@ -113,7 +114,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { private void cleanUpBackupDir() throws IOException { FileSystem fs = FileSystem.get(util.getConfiguration()); - fs.delete(new Path(BACKUP_ROOT_DIR), true); + fs.delete(new Path(BACKUP_ROOT_DIR), true); } @Test @@ -124,21 +125,21 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { runTest(); } - + private void createTable(TableName tableName) throws Exception { long startTime, endTime; HTableDescriptor desc = new HTableDescriptor(tableName); - HColumnDescriptor[] columns = + HColumnDescriptor[] columns = new HColumnDescriptor[]{new HColumnDescriptor(COLUMN_NAME)}; SplitAlgorithm algo = new RegionSplitter.UniformSplit(); - LOG.info(String.format("Creating table %s with %d splits.", tableName, + LOG.info(String.format("Creating table %s with %d splits.", tableName, regionsCountPerServer)); startTime = System.currentTimeMillis(); - HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns, + HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns, algo, regionsCountPerServer); util.waitTableAvailable(tableName); endTime = System.currentTimeMillis(); - LOG.info(String.format("Pre-split table created successfully in %dms.", + LOG.info(String.format("Pre-split table created successfully in %dms.", (endTime - startTime))); } @@ -151,7 +152,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { } private void runTest() throws IOException { - Connection conn = util.getConnection(); + Connection conn = util.getConnection(); // #0- insert some data to table TABLE_NAME1, TABLE_NAME2 loadData(TABLE_NAME1, rowsInBatch); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java index 221bc8d..2980d60 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -45,12 +45,7 @@ public interface BackupAdmin extends Closeable{ /** * Backs up given list of tables fully. Synchronous operation. * - * @param userRequest BackupRequest instance which contains the following members: - * type whether the backup is full or incremental - * tableList list of tables to backup - * targetRootDir root directory for saving the backup - * workers number of parallel workers. -1 - system defined - * bandwidth bandwidth per worker in MB per second. -1 - unlimited + * @param userRequest BackupRequest instance * @return the backup Id */ @@ -59,12 +54,7 @@ public interface BackupAdmin extends Closeable{ /** * Backs up given list of tables fully. Asynchronous operation. * - * @param userRequest BackupRequest instance which contains the following members: - * type whether the backup is full or incremental - * tableList list of tables to backup - * targetRootDir root dir for saving the backup - * workers number of paralle workers. -1 - system defined - * bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @param userRequest BackupRequest instance * @return the backup Id future */ public Future backupTablesAsync(final BackupRequest userRequest) throws IOException; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java index ba23bd4..151c10c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyTask.java @@ -32,22 +32,22 @@ public interface BackupCopyTask extends Configurable { /** * Copy backup data task - * @param backupContext - context - * @param backupManager - manager + * @param backupContext - backup context + * @param backupManager - backup manager * @param conf - configuration * @param copyType - copy type * @param options - array of options (implementation-specific) * @return result (0 - success) - * @throws IOException + * @throws IOException excpetion */ - public int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf, + int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf, BackupType copyType, String[] options) throws IOException; - + /** * Cancel copy job * @param jobHandler - copy job handler - * @throws IOException + * @throws IOException exception */ - public void cancelCopyJob(String jobHandler) throws IOException; + void cancelCopyJob(String jobHandler) throws IOException; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 4f17abf..07f39b6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -27,11 +27,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.backup.impl.BackupCommands; +import org.apache.hadoop.hbase.backup.util.LogUtils; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.LogUtils; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -48,20 +48,12 @@ public class BackupDriver extends AbstractHBaseTool implements BackupRestoreCons } protected void init() throws IOException { - // define supported options - addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC); - addOptWithArg(OPTION_TABLE, OPTION_TABLE_DESC); - addOptWithArg(OPTION_BANDWIDTH, OPTION_BANDWIDTH_DESC); - addOptWithArg(OPTION_WORKERS, OPTION_WORKERS_DESC); - addOptWithArg(OPTION_RECORD_NUMBER, OPTION_RECORD_NUMBER_DESC); - addOptWithArg(OPTION_SET, OPTION_SET_DESC); - addOptWithArg(OPTION_PATH, OPTION_PATH_DESC); - // disable irrelevant loggers to avoid it mess up command output - LogUtils.disableUselessLoggers(LOG); + LogUtils.disableZkAndClientLoggers(LOG); } private int parseAndRun(String[] args) throws IOException { + String cmd = null; String[] remainArgs = null; if (args == null || args.length == 0) { @@ -98,7 +90,7 @@ public class BackupDriver extends AbstractHBaseTool implements BackupRestoreCons // enable debug logging Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); - if (this.cmd.hasOption("debug")) { + if (this.cmd.hasOption(OPTION_DEBUG)) { backupClientLogger.setLevel(Level.DEBUG); } else { backupClientLogger.setLevel(Level.INFO); @@ -122,6 +114,14 @@ public class BackupDriver extends AbstractHBaseTool implements BackupRestoreCons @Override protected void addOptions() { + // define supported options + addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC); + addOptWithArg(OPTION_TABLE, OPTION_TABLE_DESC); + addOptWithArg(OPTION_BANDWIDTH, OPTION_BANDWIDTH_DESC); + addOptWithArg(OPTION_WORKERS, OPTION_WORKERS_DESC); + addOptWithArg(OPTION_RECORD_NUMBER, OPTION_RECORD_NUMBER_DESC); + addOptWithArg(OPTION_SET, OPTION_SET_DESC); + addOptWithArg(OPTION_PATH, OPTION_PATH_DESC); } @Override @@ -178,6 +178,7 @@ public class BackupDriver extends AbstractHBaseTool implements BackupRestoreCons return ret; } + @Override protected boolean sanityCheckOptions(CommandLine cmd) { boolean success = true; for (String reqOpt : requiredOptions) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index 77aacea..45695a9 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -60,66 +60,105 @@ public class BackupInfo implements Comparable { */ public boolean apply(BackupInfo info); } - // backup status flag + + /** + * Backup status flag + */ public static enum BackupState { WAITING, RUNNING, COMPLETE, FAILED, ANY; } - // backup phase + /** + * Backup phase + */ public static enum BackupPhase { SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; } - // backup id: a timestamp when we request the backup + /** + * Backup id + */ private String backupId; - // backup type, full or incremental + /** + * Backup type, full or incremental + */ private BackupType type; - // target root directory for storing the backup files + /** + * Target root directory for storing the backup files + */ private String targetRootDir; - // overall backup state + /** + * Backup state + */ private BackupState state; - // overall backup phase + /** + * Backup phase + */ private BackupPhase phase; - // overall backup failure message + /** + * Backup failure message + */ private String failedMsg; - // backup status map for all tables + /** + * Backup status map for all tables + */ private Map backupStatusMap; - // actual start timestamp of the backup process + /** + * Actual start timestamp of a backup process + */ private long startTs; - // actual end timestamp of the backup process, could be fail or complete + /** + * Actual end timestamp of the backup process + */ private long endTs; - // the total bytes of incremental logs copied + /** + * Total bytes of incremental logs copied + */ private long totalBytesCopied; - // for incremental backup, the location of the backed-up hlogs + /** + * For incremental backup, a location of a backed-up hlogs + */ private String hlogTargetDir = null; - // incremental backup file list + /** + * Incremental backup file list + */ transient private List incrBackupFileList; - // new region server log timestamps for table set after distributed log roll - // key - table name, value - map of RegionServer hostname -> last log rolled timestamp + /** + * New region server log timestamps for table set after distributed log roll + * key - table name, value - map of RegionServer hostname -> last log rolled timestamp + */ transient private HashMap> tableSetTimestampMap; - // backup progress in %% (0-100) + /** + * Backup progress in %% (0-100) + */ private int progress; - // distributed job id + /** + * Distributed job id + */ private String jobId; - // Number of parallel workers. -1 - system defined + /** + * Number of parallel workers. -1 - system defined + */ private int workers = -1; - // Bandwidth per worker in MB per sec. -1 - unlimited + /** + * Bandwidth per worker in MB per sec. -1 - unlimited + */ private long bandwidth = -1; public BackupInfo() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index 1c82bbe..5c869f6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -31,50 +31,50 @@ public interface BackupRestoreConstants { // Drivers option list public static final String OPTION_OVERWRITE = "o"; - public static final String OPTION_OVERWRITE_DESC = + public static final String OPTION_OVERWRITE_DESC = "Overwrite data if any of the restore target tables exists"; - + public static final String OPTION_CHECK = "c"; - public static final String OPTION_CHECK_DESC = + public static final String OPTION_CHECK_DESC = "Check restore sequence and dependencies only (does not execute the command)"; - + public static final String OPTION_SET = "s"; public static final String OPTION_SET_DESC = "Backup set name"; - public static final String OPTION_SET_RESTORE_DESC = + public static final String OPTION_SET_RESTORE_DESC = "Backup set to restore, mutually exclusive with table list "; - + public static final String OPTION_DEBUG = "d"; public static final String OPTION_DEBUG_DESC = "Enable debug loggings"; - + public static final String OPTION_TABLE = "t"; public static final String OPTION_TABLE_DESC = "Table name. If specified, only backup images,"+ " which contain this table will be listed."; - + public static final String OPTION_BANDWIDTH = "b"; public static final String OPTION_BANDWIDTH_DESC = "Bandwidth per task (MapReduce task) in MB/s"; - + public static final String OPTION_WORKERS = "w"; public static final String OPTION_WORKERS_DESC = "Number of parallel MapReduce tasks to execute"; public static final String OPTION_RECORD_NUMBER = "n"; public static final String OPTION_RECORD_NUMBER_DESC = "Number of records of backup history. Default: 10"; - + public static final String OPTION_PATH = "p"; public static final String OPTION_PATH_DESC = "Backup destination root directory path"; public static final String OPTION_TABLE_MAPPING = "m"; - public static final String OPTION_TABLE_MAPPING_DESC = + public static final String OPTION_TABLE_MAPPING_DESC = "A comma separated list of target tables. "+ "If specified, each table in must have a mapping"; - + // delimiter in tablename list in restore command public static final String TABLENAME_DELIMITER_IN_COMMAND = ","; public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root"; public static final String BACKUPID_PREFIX = "backup_"; - + /** * Backup/Restore constants */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index 9043dcc..ca1b2de 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -33,52 +33,39 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.backup.util.LogUtils; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.LogUtils; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; import org.apache.log4j.Logger; -public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreConstants{ +public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreConstants { private static final Log LOG = LogFactory.getLog(RestoreDriver.class); private CommandLine cmd; private static final String USAGE_STRING = - "Usage: bin/hbase restore [options]\n"+ - " backup_path Path to a backup destination root\n"+ - " backup_id Backup image ID to restore" + - " table(s) Comma-separated list of tables to restore"; - - + "Usage: bin/hbase restore [options]\n" + + " backup_path Path to a backup destination root\n" + + " backup_id Backup image ID to restore" + + " table(s) Comma-separated list of tables to restore"; + private static final String USAGE_FOOTER = ""; - - protected RestoreDriver() throws IOException - { + protected RestoreDriver() throws IOException { init(); } - - protected void init() throws IOException { - // define supported options - addOptNoArg(OPTION_OVERWRITE, OPTION_OVERWRITE_DESC); - addOptNoArg(OPTION_CHECK, OPTION_CHECK_DESC); - addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC); - addOptWithArg(OPTION_SET, OPTION_SET_RESTORE_DESC); - addOptWithArg(OPTION_TABLE_MAPPING, OPTION_TABLE_MAPPING_DESC); - - + protected void init() throws IOException { // disable irrelevant loggers to avoid it mess up command output - LogUtils.disableUselessLoggers(LOG); + LogUtils.disableZkAndClientLoggers(LOG); } - private int parseAndRun(String[] args) throws IOException{ - + private int parseAndRun(String[] args) throws IOException { // enable debug logging Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); if (cmd.hasOption(OPTION_DEBUG)) { @@ -103,26 +90,26 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon // parse main restore command options String[] remainArgs = cmd.getArgs(); - if (remainArgs.length < 3 && !cmd.hasOption(OPTION_SET) || - (cmd.hasOption(OPTION_SET) && remainArgs.length < 2)) { + if (remainArgs.length < 3 && !cmd.hasOption(OPTION_SET) + || (cmd.hasOption(OPTION_SET) && remainArgs.length < 2)) { printToolUsage(); return -1; - } + } String backupRootDir = remainArgs[0]; String backupId = remainArgs[1]; String tables = null; - String tableMapping = cmd.hasOption(OPTION_TABLE_MAPPING)? - cmd.getOptionValue(OPTION_TABLE_MAPPING): null; + String tableMapping = + cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null; try (final Connection conn = ConnectionFactory.createConnection(conf); BackupAdmin client = new HBaseBackupAdmin(conn);) { // Check backup set if (cmd.hasOption(OPTION_SET)) { String setName = cmd.getOptionValue(OPTION_SET); - try{ + try { tables = getTablesForSet(conn, setName, conf); - } catch(IOException e){ - System.out.println("ERROR: "+ e.getMessage()+" for setName="+setName); + } catch (IOException e) { + System.out.println("ERROR: " + e.getMessage() + " for setName=" + setName); printToolUsage(); return -2; } @@ -133,21 +120,22 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon return -3; } } else { - tables = remainArgs[2]; - } + tables = remainArgs[2]; + } TableName[] sTableArray = BackupServerUtil.parseTableNames(tables); TableName[] tTableArray = BackupServerUtil.parseTableNames(tableMapping); - if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)){ + if (sTableArray != null && tTableArray != null + && (sTableArray.length != tTableArray.length)) { System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); printToolUsage(); return -4; } client.restore(RestoreServerUtil.createRestoreRequest(backupRootDir, backupId, check, - sTableArray, tTableArray, overwrite)); - } catch (Exception e){ + sTableArray, tTableArray, overwrite)); + } catch (Exception e) { e.printStackTrace(); return -5; } @@ -162,9 +150,15 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); } } - + @Override protected void addOptions() { + // define supported options + addOptNoArg(OPTION_OVERWRITE, OPTION_OVERWRITE_DESC); + addOptNoArg(OPTION_CHECK, OPTION_CHECK_DESC); + addOptNoArg(OPTION_DEBUG, OPTION_DEBUG_DESC); + addOptWithArg(OPTION_SET, OPTION_SET_RESTORE_DESC); + addOptWithArg(OPTION_TABLE_MAPPING, OPTION_TABLE_MAPPING_DESC); } @Override @@ -185,7 +179,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon int ret = ToolRunner.run(conf, new RestoreDriver(), args); System.exit(ret); } - + @Override public int run(String[] args) throws IOException { if (conf == null) { @@ -204,8 +198,8 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon return EXIT_FAILURE; } - if (!sanityCheckOptions(cmd) || cmd.hasOption(SHORT_HELP_OPTION) || - cmd.hasOption(LONG_HELP_OPTION)) { + if (!sanityCheckOptions(cmd) || cmd.hasOption(SHORT_HELP_OPTION) + || cmd.hasOption(LONG_HELP_OPTION)) { printToolUsage(); return EXIT_FAILURE; } @@ -221,7 +215,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon } return ret; } - + protected boolean sanityCheckOptions(CommandLine cmd) { boolean success = true; for (String reqOpt : requiredOptions) { @@ -232,7 +226,7 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon } return success; } - + protected void printToolUsage() throws IOException { System.out.println(USAGE_STRING); HelpFormatter helpFormatter = new HelpFormatter(); @@ -240,6 +234,6 @@ public class RestoreDriver extends AbstractHBaseTool implements BackupRestoreCon helpFormatter.setDescPadding(8); helpFormatter.setWidth(100); helpFormatter.setSyntaxPrefix("Options:"); - helpFormatter.printHelp(" ", null, options, USAGE_FOOTER); - } + helpFormatter.printHelp(" ", null, options, USAGE_FOOTER); + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java index fa4ed3a..08e1567 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreTask.java @@ -44,7 +44,7 @@ public interface RestoreTask extends Configurable{ * @param fullBackupRestore - full backup restore * @throws IOException */ - public void run(Path[] dirPaths, TableName[] fromTables, + void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore) throws IOException; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index c19bc97..e32d7d4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -36,13 +36,12 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -57,6 +56,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.util.Bytes; /** * This class provides 'hbase:backup' table API @@ -88,6 +88,7 @@ public final class BackupSystemTable implements Closeable { return backupRoot; } + @Override public String toString() { return "/" + backupRoot + "/" + backupId + "/" + walFile; } @@ -108,6 +109,7 @@ public final class BackupSystemTable implements Closeable { this.connection = conn; } + @Override public void close() { // do nothing } @@ -235,7 +237,7 @@ public final class BackupSystemTable implements Closeable { String server = BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row); byte[] data = CellUtil.cloneValue(cell); - rsTimestampMap.put(server, Long.parseLong(new String(data))); + rsTimestampMap.put(server, Bytes.toLong(data)); } return rsTimestampMap; } @@ -278,7 +280,7 @@ public final class BackupSystemTable implements Closeable { /** * Get all backups history - * @return list of backup info + * @return list of backup info * @throws IOException */ public List getBackupHistory() throws IOException { @@ -302,7 +304,7 @@ public final class BackupSystemTable implements Closeable { return list; } - + /** * Get backup history records filtered by list * of filters. @@ -349,7 +351,7 @@ public final class BackupSystemTable implements Closeable { } return history; } - + /** * Get history for a table * @param name - table name @@ -368,10 +370,10 @@ public final class BackupSystemTable implements Closeable { return tableHistory; } - public Map> + public Map> getBackupHistoryForTableSet(Set set, String backupRoot) throws IOException { List history = getBackupHistory(backupRoot); - Map> tableHistoryMap = + Map> tableHistoryMap = new HashMap>(); for (Iterator iterator = history.iterator(); iterator.hasNext();) { BackupInfo info = iterator.next(); @@ -379,7 +381,7 @@ public final class BackupSystemTable implements Closeable { continue; } List tables = info.getTableNames(); - for (TableName tableName: tables) { + for (TableName tableName: tables) { if (set.contains(tableName)) { ArrayList list = tableHistoryMap.get(tableName); if (list == null) { @@ -392,7 +394,7 @@ public final class BackupSystemTable implements Closeable { } return tableHistoryMap; } - + /** * Get all backup session with a given status (in desc order by time) * @param status status diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java index 37f29f8..f5911b4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -48,12 +48,12 @@ import org.apache.hadoop.hbase.util.Bytes; public final class BackupSystemTableHelper { /** - * hbase:backup schema: - * 1. Backup sessions rowkey= "session:" + backupId; value = serialized BackupContext - * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode - * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] + * hbase:backup schema: + * 1. Backup sessions rowkey= "session:" + backupId; value = serialized BackupContext + * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode + * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> last WAL - * timestamp] + * timestamp] * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name */ @@ -83,7 +83,8 @@ public final class BackupSystemTableHelper { */ static Put createPutForBackupContext(BackupInfo context) throws IOException { Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId())); - put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray()); + put.addColumn(BackupSystemTable.SESSIONS_FAMILY, Bytes.toBytes("context"), + context.toByteArray()); return put; } @@ -143,7 +144,8 @@ public final class BackupSystemTableHelper { */ static Put createPutForStartCode(String startCode, String rootPath) { Put put = new Put(rowkey(START_CODE_ROW, rootPath)); - put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("startcode"), + Bytes.toBytes(startCode)); return put; } @@ -190,7 +192,7 @@ public final class BackupSystemTableHelper { */ static Scan createScanForBackupHistory() { Scan scan = new Scan(); - byte[] startRow = BACKUP_INFO_PREFIX.getBytes(); + byte[] startRow = Bytes.toBytes(BACKUP_INFO_PREFIX); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); @@ -220,7 +222,7 @@ public final class BackupSystemTableHelper { static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, String backupRoot) { Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); - put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap); return put; } @@ -246,7 +248,7 @@ public final class BackupSystemTableHelper { * @return table name */ static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { - String s = new String(cloneRow); + String s = Bytes.toString(cloneRow); int index = s.lastIndexOf(NULL); return s.substring(index + 1); } @@ -260,8 +262,7 @@ public final class BackupSystemTableHelper { static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, String backupRoot) { Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); - put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), timestamp.toString() - .getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"), Bytes.toBytes(timestamp)); return put; } @@ -288,7 +289,7 @@ public final class BackupSystemTableHelper { * @return server's name */ static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { - String s = new String(row); + String s = Bytes.toString(row); int index = s.lastIndexOf(NULL); return s.substring(index + 1); } @@ -306,9 +307,11 @@ public final class BackupSystemTableHelper { List puts = new ArrayList(); for (String file : files) { Put put = new Put(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); - put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes()); - put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes()); - put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("backupId"), + Bytes.toBytes(backupId)); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("file"), Bytes.toBytes(file)); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"), + Bytes.toBytes(backupRoot)); puts.add(put); } return puts; @@ -321,7 +324,7 @@ public final class BackupSystemTableHelper { */ public static Scan createScanForGetWALs(String backupRoot) { Scan scan = new Scan(); - byte[] startRow = WALS_PREFIX.getBytes(); + byte[] startRow = Bytes.toBytes(WALS_PREFIX); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); @@ -349,7 +352,7 @@ public final class BackupSystemTableHelper { */ static Scan createScanForBackupSetList() { Scan scan = new Scan(); - byte[] startRow = SET_KEY_PREFIX.getBytes(); + byte[] startRow = Bytes.toBytes(SET_KEY_PREFIX); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); @@ -388,7 +391,7 @@ public final class BackupSystemTableHelper { static Put createPutForBackupSet(String name, String[] tables) { Put put = new Put(rowkey(SET_KEY_PREFIX, name)); byte[] value = convertToByteArray(tables); - put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("tables"), value); return put; } @@ -405,7 +408,7 @@ public final class BackupSystemTableHelper { static String[] cellValueToBackupSet(Cell current) throws IOException { byte[] data = CellUtil.cloneValue(current); if (data != null && data.length > 0) { - return new String(data).split(","); + return Bytes.toString(data).split(","); } else { return new String[0]; } @@ -419,7 +422,7 @@ public final class BackupSystemTableHelper { */ static String cellKeyToBackupSetName(Cell current) throws IOException { byte[] data = CellUtil.cloneRow(current); - return new String(data).substring(SET_KEY_PREFIX.length()); + return Bytes.toString(data).substring(SET_KEY_PREFIX.length()); } static byte[] rowkey(String s, String... other) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index 3b77183..afdd438 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.zookeeper.KeeperException; /** * This manager class handles the work dealing with backup for a {@link HRegionServer}. @@ -140,7 +141,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa } @Override - public void initialize(RegionServerServices rss) throws IOException { + public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; BaseCoordinatedStateManager coordManager = (BaseCoordinatedStateManager) CoordinatedStateManagerFactory.getCoordinatedStateManager(rss diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/LogUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/LogUtils.java new file mode 100644 index 0000000..e81c56e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/LogUtils.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.util; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +@InterfaceAudience.Private +public final class LogUtils { + + private LogUtils() { + } + /** + * Disables Zk- and HBase client logging + * @param log + */ + public static void disableZkAndClientLoggers(Log log) { + // disable zookeeper log to avoid it mess up command output + Logger zkLogger = Logger.getLogger("org.apache.zookeeper"); + zkLogger.setLevel(Level.OFF); + // disable hbase zookeeper tool log to avoid it mess up command output + Logger hbaseZkLogger = Logger.getLogger("org.apache.hadoop.hbase.zookeeper"); + hbaseZkLogger.setLevel(Level.OFF); + // disable hbase client log to avoid it mess up command output + Logger hbaseClientLogger = Logger.getLogger("org.apache.hadoop.hbase.client"); + hbaseClientLogger.setLevel(Level.OFF); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index 3342743..06731d2 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Server; +import org.apache.zookeeper.KeeperException; /** * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations. @@ -70,6 +71,6 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureMemberRpc} */ public abstract ProcedureMemberRpcs - getProcedureMemberRpcs(String procType) throws IOException; + getProcedureMemberRpcs(String procType) throws KeeperException; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 7cf4aab..b3562e8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. @@ -69,7 +70,7 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { } @Override - public ProcedureMemberRpcs getProcedureMemberRpcs(String procType) throws IOException { + public ProcedureMemberRpcs getProcedureMemberRpcs(String procType) throws KeeperException { return new ZKProcedureMemberRpcs(watcher, procType); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index f326cf7..c94051e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -47,8 +47,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.hadoop.mapreduce.Counter; -import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -141,8 +139,7 @@ public class WALPlayer extends Configured implements Tool { protected static class WALMapper extends Mapper { private Map tables = new TreeMap(); - - + @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { @@ -155,7 +152,6 @@ public class WALPlayer extends Configured implements Tool { Put put = null; Delete del = null; Cell lastCell = null; - for (Cell cell : value.getCells()) { // filtering WAL meta entries if (WALEdit.isMetaEditFamily(cell)) { @@ -204,11 +200,6 @@ public class WALPlayer extends Configured implements Tool { } } - /** - * Filter cell - * @param cell cell - * @return Return true if we are to emit this cell. - */ protected boolean filter(Context context, final Cell cell) { return true; } @@ -288,12 +279,12 @@ public class WALPlayer extends Configured implements Tool { conf.setStrings(TABLE_MAP_KEY, tableMap); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis())); job.setJarByClass(WALPlayer.class); - - FileInputFormat.addInputPaths(job, inputDirs); - + + FileInputFormat.addInputPaths(job, inputDirs); + job.setInputFormatClass(WALInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); - + String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs); @@ -314,7 +305,7 @@ public class WALPlayer extends Configured implements Tool { HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); } LOG.debug("success configuring load incremental job"); - + TableMapReduceUtil.addDependencyJars(job.getConfiguration(), com.google.common.base.Preconditions.class); } else { @@ -329,7 +320,7 @@ public class WALPlayer extends Configured implements Tool { return job; } - + /** * Print usage * @param errorMsg Error message. Can be null. @@ -380,6 +371,6 @@ public class WALPlayer extends Configured implements Tool { } Job job = createSubmittableJob(args); int result =job.waitForCompletion(true) ? 0 : 1; - return result; + return result; } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureUtil.java deleted file mode 100644 index 0ce8e70..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureUtil.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.procedure; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.Map; -import java.util.Map.Entry; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.procedure.MasterProcedureManager; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.security.UserGroupInformation; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class ProcedureUtil { - private static final Log LOG = LogFactory.getLog(ProcedureUtil.class); - - public static ProcedureDescription buildProcedure(String signature, String instance, - Map props) { - ProcedureDescription.Builder builder = ProcedureDescription.newBuilder(); - builder.setSignature(signature).setInstance(instance); - for (Entry entry : props.entrySet()) { - NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey()) - .setValue(entry.getValue()).build(); - builder.addConfiguration(pair); - } - ProcedureDescription desc = builder.build(); - return desc; - } - - public static long execProcedure(MasterProcedureManager mpm, String signature, String instance, - Map props) throws IOException { - if (mpm == null) { - throw new IOException("The procedure is not registered: " + signature); - } - ProcedureDescription desc = buildProcedure(signature, instance, props); - mpm.execProcedure(desc); - - // send back the max amount of time the client should wait for the procedure - // to complete - long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; - return waitTime; - } - - public static void waitForProcedure(MasterProcedureManager mpm, String signature, String instance, - Map props, long max, int numRetries, long pause) throws IOException { - ProcedureDescription desc = buildProcedure(signature, instance, props); - long start = EnvironmentEdgeManager.currentTime(); - long maxPauseTime = max / numRetries; - int tries = 0; - LOG.debug("Waiting a max of " + max + " ms for procedure '" + - signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); - boolean done = false; - while (tries == 0 - || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { - try { - // sleep a backoff <= pauseTime amount - long sleep = HBaseAdmin.getPauseTime(tries++, pause); - sleep = sleep > maxPauseTime ? maxPauseTime : sleep; - LOG.debug("(#" + tries + ") Sleeping: " + sleep + - "ms while waiting for procedure completion."); - Thread.sleep(sleep); - } catch (InterruptedException e) { - throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); - } - LOG.debug("Getting current status of procedure from master..."); - done = mpm.isProcedureDone(desc); - } - if (!done) { - throw new IOException("Procedure '" + signature + " : " + instance - + "' wasn't completed in expectedTime:" + max + " ms"); - } - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java index b6e11ea..95c3ffe 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java @@ -37,7 +37,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager { * @param rss Region Server service interface * @throws KeeperException */ - public abstract void initialize(RegionServerServices rss) throws IOException; + public abstract void initialize(RegionServerServices rss) throws KeeperException; /** * Start accepting procedure requests. diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java index adb3604..0f4ea64 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; +import org.apache.zookeeper.KeeperException; /** * Provides the globally barriered procedure framework and environment @@ -38,7 +39,7 @@ public class RegionServerProcedureManagerHost extends private static final Log LOG = LogFactory .getLog(RegionServerProcedureManagerHost.class); - public void initialize(RegionServerServices rss) throws IOException { + public void initialize(RegionServerServices rss) throws KeeperException { for (RegionServerProcedureManager proc : procedures) { LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing"); proc.initialize(rss); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index 9b491fd..2e03a60 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -68,53 +68,49 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { * @throws KeeperException if we can't reach zookeeper */ public ZKProcedureMemberRpcs(final ZooKeeperWatcher watcher, final String procType) - throws IOException { - try { - this.zkController = new ZKProcedureUtil(watcher, procType) { - @Override - public void nodeCreated(String path) { - if (!isInProcedurePath(path)) { - return; - } + throws KeeperException { + this.zkController = new ZKProcedureUtil(watcher, procType) { + @Override + public void nodeCreated(String path) { + if (!isInProcedurePath(path)) { + return; + } - LOG.info("Received created event:" + path); - // if it is a simple start/end/abort then we just rewatch the node - if (isAcquiredNode(path)) { - waitForNewProcedures(); - return; - } else if (isAbortNode(path)) { - watchForAbortedProcedures(); - return; - } - String parent = ZKUtil.getParent(path); - // if its the end barrier, the procedure can be completed - if (isReachedNode(parent)) { - receivedReachedGlobalBarrier(path); - return; - } else if (isAbortNode(parent)) { - abort(path); - return; - } else if (isAcquiredNode(parent)) { - startNewSubprocedure(path); - } else { - LOG.debug("Ignoring created notification for node:" + path); - } + LOG.info("Received created event:" + path); + // if it is a simple start/end/abort then we just rewatch the node + if (isAcquiredNode(path)) { + waitForNewProcedures(); + return; + } else if (isAbortNode(path)) { + watchForAbortedProcedures(); + return; } + String parent = ZKUtil.getParent(path); + // if its the end barrier, the procedure can be completed + if (isReachedNode(parent)) { + receivedReachedGlobalBarrier(path); + return; + } else if (isAbortNode(parent)) { + abort(path); + return; + } else if (isAcquiredNode(parent)) { + startNewSubprocedure(path); + } else { + LOG.debug("Ignoring created notification for node:" + path); + } + } - @Override - public void nodeChildrenChanged(String path) { - if (path.equals(this.acquiredZnode)) { - LOG.info("Received procedure start children changed event: " + path); - waitForNewProcedures(); - } else if (path.equals(this.abortZnode)) { - LOG.info("Received procedure abort children changed event: " + path); - watchForAbortedProcedures(); - } + @Override + public void nodeChildrenChanged(String path) { + if (path.equals(this.acquiredZnode)) { + LOG.info("Received procedure start children changed event: " + path); + waitForNewProcedures(); + } else if (path.equals(this.abortZnode)) { + LOG.info("Received procedure abort children changed event: " + path); + watchForAbortedProcedures(); } - }; - } catch (KeeperException e) { - throw new IOException(e); - } + } + }; } public ZKProcedureUtil getZkController() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index bd65cc7..1aa959c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -317,7 +317,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur * @throws KeeperException if the zookeeper cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws IOException { + public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 36d2c53..eb0c9c0 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -829,7 +829,7 @@ public class HRegionServer extends HasThread implements rspmHost = new RegionServerProcedureManagerHost(); rspmHost.loadProcedures(conf); rspmHost.initialize(this); - } catch (IOException e) { + } catch (KeeperException e) { this.abort("Failed to reach coordination cluster when creating procedure handler.", e); } // register watcher for recovering regions diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index e56dd28..537329a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -390,7 +390,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * @throws KeeperException if the zookeeper cluster cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws IOException { + public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 3c744d6..fd66143 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -96,8 +96,6 @@ import com.lmax.disruptor.TimeoutException; import com.lmax.disruptor.dsl.Disruptor; import com.lmax.disruptor.dsl.ProducerType; - - /** * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS. * Only one WAL is ever being written at a time. When a WAL hits a configured maximum size, diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java deleted file mode 100644 index 2b50d49..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import org.apache.commons.logging.Log; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; - -@InterfaceAudience.Private -public final class LogUtils { - - private LogUtils() { - } - /** - * Disables Zk- and HBase client logging - * @param log - */ - public static void disableUselessLoggers(Log log) { - // disable zookeeper log to avoid it mess up command output - Logger zkLogger = Logger.getLogger("org.apache.zookeeper"); - zkLogger.setLevel(Level.OFF); - // disable hbase zookeeper tool log to avoid it mess up command output - Logger hbaseZkLogger = Logger.getLogger("org.apache.hadoop.hbase.zookeeper"); - hbaseZkLogger.setLevel(Level.OFF); - // disable hbase client log to avoid it mess up command output - Logger hbaseClientLogger = Logger.getLogger("org.apache.hadoop.hbase.client"); - hbaseClientLogger.setLevel(Level.OFF); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProcedureUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProcedureUtil.java new file mode 100644 index 0000000..9741f4a --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/ProcedureUtil.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ProcedureUtil { + private static final Log LOG = LogFactory.getLog(ProcedureUtil.class); + + public static ProcedureDescription buildProcedure(String signature, String instance, + Map props) { + ProcedureDescription.Builder builder = ProcedureDescription.newBuilder(); + builder.setSignature(signature).setInstance(instance); + for (Entry entry : props.entrySet()) { + NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey()) + .setValue(entry.getValue()).build(); + builder.addConfiguration(pair); + } + ProcedureDescription desc = builder.build(); + return desc; + } + + public static long execProcedure(MasterProcedureManager mpm, String signature, String instance, + Map props) throws IOException { + if (mpm == null) { + throw new IOException("The procedure is not registered: " + signature); + } + ProcedureDescription desc = buildProcedure(signature, instance, props); + mpm.execProcedure(desc); + + // send back the max amount of time the client should wait for the procedure + // to complete + long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; + return waitTime; + } + + public static void waitForProcedure(MasterProcedureManager mpm, String signature, String instance, + Map props, long max, int numRetries, long pause) throws IOException { + ProcedureDescription desc = buildProcedure(signature, instance, props); + long start = EnvironmentEdgeManager.currentTime(); + long maxPauseTime = max / numRetries; + int tries = 0; + LOG.debug("Waiting a max of " + max + " ms for procedure '" + + signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); + boolean done = false; + while (tries == 0 + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { + try { + // sleep a backoff <= pauseTime amount + long sleep = HBaseAdmin.getPauseTime(tries++, pause); + sleep = sleep > maxPauseTime ? maxPauseTime : sleep; + LOG.debug("(#" + tries + ") Sleeping: " + sleep + + "ms while waiting for procedure completion."); + Thread.sleep(sleep); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); + } + LOG.debug("Getting current status of procedure from master..."); + done = mpm.isProcedureDone(desc); + } + if (!done) { + throw new IOException("Procedure '" + signature + " : " + instance + + "' wasn't completed in expectedTime:" + max + " ms"); + } + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 2ab8cb8..ae19137 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2087,8 +2087,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { put.addColumn(f, new byte[]{0}, new byte[]{0}); t.put(put); } - } - + } + public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow, int replicaId) throws IOException { @@ -3188,7 +3188,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME); waitUntilAllRegionsAssigned(TableName.BACKUP_TABLE_NAME); } - + /** * Wait until all regions for a table in hbase:meta have a non-empty * info:server, or until timeout. This means all regions have been deployed, @@ -3486,6 +3486,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public PortAllocator(Random random) { this.random = random; this.portChecker = new AvailablePortChecker() { + @Override public boolean available(int port) { try { ServerSocket sock = new ServerSocket(port); @@ -3645,7 +3646,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return createPreSplitLoadTestTable(conf, desc, new HColumnDescriptor[] {hcd}, numRegionsPerServer); } - + /** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. @@ -3653,11 +3654,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public static int createPreSplitLoadTestTable(Configuration conf, HTableDescriptor desc, HColumnDescriptor[] hcds, int numRegionsPerServer) throws IOException { - - return createPreSplitLoadTestTable(conf, desc, hcds, + + return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(), numRegionsPerServer); } - + /** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index cd2efad..7620bbb 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -49,7 +49,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { private ProcedureMember member; @Override - public void initialize(RegionServerServices rss) throws IOException { + public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature());