diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
index 085a1c9..497f7ad 100644
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
@@ -8944,6 +8944,36 @@ public final class BackupProtos {
* optional uint32 progress = 12;
*/
int getProgress();
+
+ // optional bytes job_id = 13;
+ /**
+ * optional bytes job_id = 13;
+ */
+ boolean hasJobId();
+ /**
+ * optional bytes job_id = 13;
+ */
+ com.google.protobuf.ByteString getJobId();
+
+ // required uint32 workers_number = 14;
+ /**
+ * required uint32 workers_number = 14;
+ */
+ boolean hasWorkersNumber();
+ /**
+ * required uint32 workers_number = 14;
+ */
+ int getWorkersNumber();
+
+ // required uint32 bandwidth = 15;
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ boolean hasBandwidth();
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ int getBandwidth();
}
/**
* Protobuf type {@code hbase.pb.BackupContext}
@@ -9077,6 +9107,21 @@ public final class BackupProtos {
progress_ = input.readUInt32();
break;
}
+ case 106: {
+ bitField0_ |= 0x00000800;
+ jobId_ = input.readBytes();
+ break;
+ }
+ case 112: {
+ bitField0_ |= 0x00001000;
+ workersNumber_ = input.readUInt32();
+ break;
+ }
+ case 120: {
+ bitField0_ |= 0x00002000;
+ bandwidth_ = input.readUInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -9667,6 +9712,54 @@ public final class BackupProtos {
return progress_;
}
+ // optional bytes job_id = 13;
+ public static final int JOB_ID_FIELD_NUMBER = 13;
+ private com.google.protobuf.ByteString jobId_;
+ /**
+ * optional bytes job_id = 13;
+ */
+ public boolean hasJobId() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * optional bytes job_id = 13;
+ */
+ public com.google.protobuf.ByteString getJobId() {
+ return jobId_;
+ }
+
+ // required uint32 workers_number = 14;
+ public static final int WORKERS_NUMBER_FIELD_NUMBER = 14;
+ private int workersNumber_;
+ /**
+ * required uint32 workers_number = 14;
+ */
+ public boolean hasWorkersNumber() {
+ return ((bitField0_ & 0x00001000) == 0x00001000);
+ }
+ /**
+ * required uint32 workers_number = 14;
+ */
+ public int getWorkersNumber() {
+ return workersNumber_;
+ }
+
+ // required uint32 bandwidth = 15;
+ public static final int BANDWIDTH_FIELD_NUMBER = 15;
+ private int bandwidth_;
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ public boolean hasBandwidth() {
+ return ((bitField0_ & 0x00002000) == 0x00002000);
+ }
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ public int getBandwidth() {
+ return bandwidth_;
+ }
+
private void initFields() {
backupId_ = "";
type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
@@ -9680,6 +9773,9 @@ public final class BackupProtos {
totalBytesCopied_ = 0L;
hlogTargetDir_ = "";
progress_ = 0;
+ jobId_ = com.google.protobuf.ByteString.EMPTY;
+ workersNumber_ = 0;
+ bandwidth_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -9710,6 +9806,14 @@ public final class BackupProtos {
memoizedIsInitialized = 0;
return false;
}
+ if (!hasWorkersNumber()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasBandwidth()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
for (int i = 0; i < getBackupStatusMapCount(); i++) {
if (!getBackupStatusMap(i).isInitialized()) {
memoizedIsInitialized = 0;
@@ -9759,6 +9863,15 @@ public final class BackupProtos {
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt32(12, progress_);
}
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ output.writeBytes(13, jobId_);
+ }
+ if (((bitField0_ & 0x00001000) == 0x00001000)) {
+ output.writeUInt32(14, workersNumber_);
+ }
+ if (((bitField0_ & 0x00002000) == 0x00002000)) {
+ output.writeUInt32(15, bandwidth_);
+ }
getUnknownFields().writeTo(output);
}
@@ -9816,6 +9929,18 @@ public final class BackupProtos {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(12, progress_);
}
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(13, jobId_);
+ }
+ if (((bitField0_ & 0x00001000) == 0x00001000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(14, workersNumber_);
+ }
+ if (((bitField0_ & 0x00002000) == 0x00002000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(15, bandwidth_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -9896,6 +10021,21 @@ public final class BackupProtos {
result = result && (getProgress()
== other.getProgress());
}
+ result = result && (hasJobId() == other.hasJobId());
+ if (hasJobId()) {
+ result = result && getJobId()
+ .equals(other.getJobId());
+ }
+ result = result && (hasWorkersNumber() == other.hasWorkersNumber());
+ if (hasWorkersNumber()) {
+ result = result && (getWorkersNumber()
+ == other.getWorkersNumber());
+ }
+ result = result && (hasBandwidth() == other.hasBandwidth());
+ if (hasBandwidth()) {
+ result = result && (getBandwidth()
+ == other.getBandwidth());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -9957,6 +10097,18 @@ public final class BackupProtos {
hash = (37 * hash) + PROGRESS_FIELD_NUMBER;
hash = (53 * hash) + getProgress();
}
+ if (hasJobId()) {
+ hash = (37 * hash) + JOB_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getJobId().hashCode();
+ }
+ if (hasWorkersNumber()) {
+ hash = (37 * hash) + WORKERS_NUMBER_FIELD_NUMBER;
+ hash = (53 * hash) + getWorkersNumber();
+ }
+ if (hasBandwidth()) {
+ hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
+ hash = (53 * hash) + getBandwidth();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -10095,6 +10247,12 @@ public final class BackupProtos {
bitField0_ = (bitField0_ & ~0x00000400);
progress_ = 0;
bitField0_ = (bitField0_ & ~0x00000800);
+ jobId_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00001000);
+ workersNumber_ = 0;
+ bitField0_ = (bitField0_ & ~0x00002000);
+ bandwidth_ = 0;
+ bitField0_ = (bitField0_ & ~0x00004000);
return this;
}
@@ -10176,6 +10334,18 @@ public final class BackupProtos {
to_bitField0_ |= 0x00000400;
}
result.progress_ = progress_;
+ if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
+ to_bitField0_ |= 0x00000800;
+ }
+ result.jobId_ = jobId_;
+ if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
+ to_bitField0_ |= 0x00001000;
+ }
+ result.workersNumber_ = workersNumber_;
+ if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
+ to_bitField0_ |= 0x00002000;
+ }
+ result.bandwidth_ = bandwidth_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -10259,6 +10429,15 @@ public final class BackupProtos {
if (other.hasProgress()) {
setProgress(other.getProgress());
}
+ if (other.hasJobId()) {
+ setJobId(other.getJobId());
+ }
+ if (other.hasWorkersNumber()) {
+ setWorkersNumber(other.getWorkersNumber());
+ }
+ if (other.hasBandwidth()) {
+ setBandwidth(other.getBandwidth());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -10288,6 +10467,14 @@ public final class BackupProtos {
return false;
}
+ if (!hasWorkersNumber()) {
+
+ return false;
+ }
+ if (!hasBandwidth()) {
+
+ return false;
+ }
for (int i = 0; i < getBackupStatusMapCount(); i++) {
if (!getBackupStatusMap(i).isInitialized()) {
@@ -11092,6 +11279,108 @@ public final class BackupProtos {
return this;
}
+ // optional bytes job_id = 13;
+ private com.google.protobuf.ByteString jobId_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes job_id = 13;
+ */
+ public boolean hasJobId() {
+ return ((bitField0_ & 0x00001000) == 0x00001000);
+ }
+ /**
+ * optional bytes job_id = 13;
+ */
+ public com.google.protobuf.ByteString getJobId() {
+ return jobId_;
+ }
+ /**
+ * optional bytes job_id = 13;
+ */
+ public Builder setJobId(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00001000;
+ jobId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bytes job_id = 13;
+ */
+ public Builder clearJobId() {
+ bitField0_ = (bitField0_ & ~0x00001000);
+ jobId_ = getDefaultInstance().getJobId();
+ onChanged();
+ return this;
+ }
+
+ // required uint32 workers_number = 14;
+ private int workersNumber_ ;
+ /**
+ * required uint32 workers_number = 14;
+ */
+ public boolean hasWorkersNumber() {
+ return ((bitField0_ & 0x00002000) == 0x00002000);
+ }
+ /**
+ * required uint32 workers_number = 14;
+ */
+ public int getWorkersNumber() {
+ return workersNumber_;
+ }
+ /**
+ * required uint32 workers_number = 14;
+ */
+ public Builder setWorkersNumber(int value) {
+ bitField0_ |= 0x00002000;
+ workersNumber_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint32 workers_number = 14;
+ */
+ public Builder clearWorkersNumber() {
+ bitField0_ = (bitField0_ & ~0x00002000);
+ workersNumber_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // required uint32 bandwidth = 15;
+ private int bandwidth_ ;
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ public boolean hasBandwidth() {
+ return ((bitField0_ & 0x00004000) == 0x00004000);
+ }
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ public int getBandwidth() {
+ return bandwidth_;
+ }
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ public Builder setBandwidth(int value) {
+ bitField0_ |= 0x00004000;
+ bandwidth_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint32 bandwidth = 15;
+ */
+ public Builder clearBandwidth() {
+ bitField0_ = (bitField0_ & ~0x00004000);
+ bandwidth_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext)
}
@@ -11184,7 +11473,7 @@ public final class BackupProtos {
"eName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n\010snapshot\030\003 " +
"\001(\t\"f\n\021TableBackupStatus\022\"\n\005table\030\001 \002(\0132" +
"\023.hbase.pb.TableName\022-\n\rbackup_status\030\002 " +
- "\002(\0132\026.hbase.pb.BackupStatus\"\321\004\n\rBackupCo" +
+ "\002(\0132\026.hbase.pb.BackupStatus\"\214\005\n\rBackupCo" +
"ntext\022\021\n\tbackup_id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024" +
".hbase.pb.BackupType\022\027\n\017target_root_dir\030",
"\003 \002(\t\0222\n\005state\030\004 \001(\0162#.hbase.pb.BackupCo" +
@@ -11194,15 +11483,16 @@ public final class BackupProtos {
"2\033.hbase.pb.TableBackupStatus\022\020\n\010start_t" +
"s\030\010 \002(\004\022\016\n\006end_ts\030\t \002(\004\022\032\n\022total_bytes_c" +
"opied\030\n \002(\003\022\027\n\017hlog_target_dir\030\013 \001(\t\022\020\n\010" +
- "progress\030\014 \001(\r\"P\n\013BackupState\022\013\n\007WAITING" +
- "\020\000\022\013\n\007RUNNING\020\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020" +
- "\003\022\r\n\tCANCELLED\020\004\"}\n\013BackupPhase\022\013\n\007REQUE",
- "ST\020\000\022\014\n\010SNAPSHOT\020\001\022\027\n\023PREPARE_INCREMENTA" +
- "L\020\002\022\020\n\014SNAPSHOTCOPY\020\003\022\024\n\020INCREMENTAL_COP" +
- "Y\020\004\022\022\n\016STORE_MANIFEST\020\005*\'\n\nBackupType\022\010\n" +
- "\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*org.apache.h" +
- "adoop.hbase.protobuf.generatedB\014BackupPr" +
- "otosH\001\210\001\001\240\001\001"
+ "progress\030\014 \001(\r\022\016\n\006job_id\030\r \001(\014\022\026\n\016worker" +
+ "s_number\030\016 \002(\r\022\021\n\tbandwidth\030\017 \002(\r\"P\n\013Bac" +
+ "kupState\022\013\n\007WAITING\020\000\022\013\n\007RUNNING\020\001\022\014\n\010CO",
+ "MPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCELLED\020\004\"}\n\013B" +
+ "ackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SNAPSHOT\020\001\022\027\n" +
+ "\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNAPSHOTCOPY\020\003" +
+ "\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STORE_MANIFEST" +
+ "\020\005*\'\n\nBackupType\022\010\n\004FULL\020\000\022\017\n\013INCREMENTA" +
+ "L\020\001BB\n*org.apache.hadoop.hbase.protobuf." +
+ "generatedB\014BackupProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11262,7 +11552,7 @@ public final class BackupProtos {
internal_static_hbase_pb_BackupContext_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BackupContext_descriptor,
- new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "BackupStatusMap", "StartTs", "EndTs", "TotalBytesCopied", "HlogTargetDir", "Progress", });
+ new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "BackupStatusMap", "StartTs", "EndTs", "TotalBytesCopied", "HlogTargetDir", "Progress", "JobId", "WorkersNumber", "Bandwidth", });
return null;
}
};
diff --git hbase-protocol/src/main/protobuf/Backup.proto hbase-protocol/src/main/protobuf/Backup.proto
index 06f77d4..f15a483 100644
--- hbase-protocol/src/main/protobuf/Backup.proto
+++ hbase-protocol/src/main/protobuf/Backup.proto
@@ -100,6 +100,9 @@ message BackupContext {
required int64 total_bytes_copied = 10;
optional string hlog_target_dir = 11;
optional uint32 progress = 12;
+ optional bytes job_id = 13;
+ required uint32 workers_number = 14;
+ required uint32 bandwidth = 15;
enum BackupState {
WAITING = 0;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
index 06a6cc1..345c53c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
@@ -21,13 +21,14 @@ import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
public interface BackupClient {
public void setConf(Configuration conf);
/**
- * Send backup request to server, and monitor the progress if necessary
+ * Sends backup request to server, and monitor the progress if necessary
* @param backupType : full or incremental
* @param targetRootDir : the root path specified by user
* @param tableList : the table list specified by user
@@ -35,6 +36,93 @@ public interface BackupClient {
* @return backupId backup id
* @throws IOException exception
*/
- public String create(BackupType backupType, List tableList,
+ public String create(BackupType backupType, List tableList,
String targetRootDir, String snapshot) throws IOException;
- }
+
+ /**
+ * Sends backup request to server and monitor progress if necessary
+ * @param backupType : full or incremental
+ * @param tableList : table list
+ * @param targetRootDir: target root directory
+ * @param snapshot : using existing snapshot
+ * @param workers : number of workers for parallel data copy
+ * @param bandwidth : maximum bandwidth (in MB/sec) of copy job
+ * @return backup id
+ * @throws IOException
+ */
+ public String create(BackupType backupType, List tableList,
+ String targetRootDir, String snapshot, int workers, int bandwidth) throws IOException;
+
+ /**
+ * Describe backup image command
+ * @param backupId - backup id
+ * @throws IOException
+ */
+ public void describeBackupImage(String backupId) throws IOException;
+
+ /**
+ * Show backup progress command
+ * @param backupId - backup id
+ * @throws IOException
+ */
+ public void showProgress(String backupId) throws IOException;
+
+ /**
+ * Delete backup image command
+ * @param backupIds - backup id
+ * @throws IOException
+ */
+ public void deleteBackups(String[] backupIds) throws IOException;
+
+ /**
+ * Cancel current active backup command
+ * @param backupId - backup id
+ * @throws IOException
+ */
+ public void cancelBackup(String backupId) throws IOException;
+
+ /**
+ * Show backup history command
+ * @param n - last n backup sessions
+ * @throws IOException
+ */
+ public void showHistory(int n) throws IOException;
+
+ /**
+ * Backup set list command
+ * @throws IOException
+ */
+ public void backupSetList() throws IOException;
+
+ /**
+ * Backup set describe command
+ * @param name set name
+ * @throws IOException
+ */
+ public void backupSetDescribe(String name) throws IOException;
+
+ /**
+ * Delete backup set command
+ * @param name - backup set name
+ * @throws IOException
+ */
+ public void backupSetDelete(String name) throws IOException;
+
+ /**
+ * Add tables to backup set command
+ * @param name set name
+ * @param tables - list of tables
+ * @throws IOException
+ */
+ public void backupSetAdd(String name, String[] tablesOrNamespaces) throws IOException;
+
+ /**
+ * Remove tables from backup set
+ * @param name - backup set name
+ * @param tables - list of tables
+ * @throws IOException
+ */
+ public void backupSetRemove(String name, String[] tablesOrNamepsaces) throws IOException;
+
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
index 8bd9bff..570636b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -25,6 +25,7 @@ import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.backup.impl.BackupCommands;
import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants;
import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
@@ -36,12 +37,19 @@ public class BackupDriver {
private static final Log LOG = LogFactory.getLog(BackupDriver.class);
private static Options opt;
+ private static Configuration conf;
public static void main(String[] args) throws IOException {
+ init();
parseAndRun(args);
- System.exit(0);
+ //System.exit(0);
}
+ public static void setConf(Configuration conf)
+ {
+ BackupDriver.conf = conf;
+ }
+
protected static void init() throws IOException {
// define supported options
opt = new Options();
@@ -49,6 +57,10 @@ public class BackupDriver {
opt.addOption("all", false, "All tables");
opt.addOption("debug", false, "Enable debug loggings");
opt.addOption("t", true, "Table name");
+ opt.addOption("b", true, "Bandwidth (MB/s)");
+ opt.addOption("w", true, "Number of workers");
+ opt.addOption("n", true, "History length");
+ opt.addOption("set", true, "Backup set name");
// disable irrelevant loggers to avoid it mess up command output
LogUtils.disableUselessLoggers(LOG);
}
@@ -90,8 +102,11 @@ public class BackupDriver {
} else {
backupClientLogger.setLevel(Level.INFO);
}
-
- BackupCommands.createCommand(type, cmdline).execute();
+ BackupCommands.Command command = BackupCommands.createCommand(type, cmdline);
+ if( type == BackupCommand.CREATE && conf != null) {
+ ((BackupCommands.CreateCommand) command).setConf(conf);
+ }
+ command.execute();
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
index 3f68914..d96a2cb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
@@ -30,8 +30,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupClient;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
@@ -70,10 +73,12 @@ public final class BackupClientImpl implements BackupClient{
* @param tableList : tables to be backuped
* @param targetRootDir : specified by user
* @param snapshot : use existing snapshot if specified by user (for future jira)
+ * @param workers - number of parallel workers in copy data job
+ * @param bandwidth - maximum bandwidth per works in MB/sec
* @throws IOException exception
*/
protected void requestBackup(String backupId, BackupType backupType, List tableList,
- String targetRootDir, String snapshot) throws IOException {
+ String targetRootDir, String snapshot, int workers, int bandwidth) throws IOException {
BackupContext backupContext = null;
if (snapshot != null) {
@@ -150,7 +155,8 @@ public final class BackupClientImpl implements BackupClient{
}
}
backupContext =
- backupManager.createBackupContext(backupId, backupType, tables, targetRootDir, snapshot);
+ backupManager.createBackupContext(backupId, backupType,
+ tables, targetRootDir, snapshot, workers, bandwidth);
backupManager.initialize();
backupManager.dispatchRequest(backupContext);
} catch (BackupException e) {
@@ -182,10 +188,16 @@ public final class BackupClientImpl implements BackupClient{
}
return sb.toString();
}
-
+
+ @Override
+ public String create(BackupType type, List tableList, String backupRootPath,
+ String snapshot) throws IOException {
+ return create(type, tableList, backupRootPath, snapshot, -1, -1);
+ }
+
@Override
public String create(BackupType backupType, List tableList, String backupRootPath,
- String snapshot) throws IOException {
+ String snapshot, int workers, int bandwidth) throws IOException {
String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
// check target path first, confirm it doesn't exist before backup
@@ -217,7 +229,7 @@ public final class BackupClientImpl implements BackupClient{
// table list specified for backup, trigger backup on specified tables
try {
- requestBackup(backupId, backupType, tableList, backupRootPath, snapshot);
+ requestBackup(backupId, backupType, tableList, backupRootPath, snapshot, workers, bandwidth);
} catch (RuntimeException e) {
String errMsg = e.getMessage();
if (errMsg != null
@@ -236,4 +248,218 @@ public final class BackupClientImpl implements BackupClient{
return backupId;
}
+ @Override
+ public void describeBackupImage(String backupId) throws IOException {
+ BackupContext backupContext = null;
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ System.out.println(backupContext.getShortDescription());
+ } else {
+ System.out.println("No information found for backupID=" + backupId);
+ }
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void showProgress(String backupId) throws IOException {
+ BackupContext backupContext = null;
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+
+ if (backupId == null) {
+ ArrayList recentSessions =
+ systemTable.getBackupContexts(BackupState.RUNNING);
+ if (recentSessions.isEmpty()) {
+ System.out.println("No ongonig sessions found.");
+ return;
+ }
+ // else show status for all ongoing sessions
+ // must be one maximum
+ for (BackupContext context : recentSessions) {
+ System.out.println(context.getStatusAndProgressAsString());
+ }
+ } else {
+
+ backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ System.out.println(backupContext.getStatusAndProgressAsString());
+ } else {
+ System.out.println("No information found for backupID=" + backupId);
+ }
+ }
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void deleteBackups(String[] backupIds) throws IOException {
+ BackupContext backupContext = null;
+ String backupId = null;
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ for (int i = 0; i < backupIds.length; i++) {
+ backupId = backupIds[i];
+ backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ BackupUtil.cleanupBackupData(backupContext, conf);
+ systemTable.deleteBackupStatus(backupContext.getBackupId());
+ System.out.println("Delete backup for backupID=" + backupId + " completed.");
+ } else {
+ System.out.println("Delete backup failed: no information found for backupID=" + backupId);
+ }
+ }
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void cancelBackup(String backupId) throws IOException {
+ // Kill distributed job if active
+ // Backup MUST not be in COMPLETE state
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ BackupContext backupContext = systemTable.readBackupStatus(backupId);
+ String errMessage = null;
+ if (backupContext != null && backupContext.getState() != BackupState.COMPLETE) {
+ BackupUtil.cleanupBackupData(backupContext, conf);
+ systemTable.deleteBackupStatus(backupContext.getBackupId());
+ byte[] jobId = backupContext.getJobId();
+ if(jobId != null) {
+ BackupCopyService service = BackupRestoreFactory.getBackupCopyService(conf);
+ service.cancelCopyJob(jobId);
+ } else{
+ errMessage = "Distributed Job ID is null for backup "+backupId +
+ " in "+ backupContext.getState() + " state.";
+ }
+ } else if( backupContext == null){
+ errMessage = "No information found for backupID=" + backupId;
+ } else {
+ errMessage = "Can not cancel "+ backupId + " in " + backupContext.getState()+" state";
+ }
+
+ if( errMessage != null) {
+ throw new IOException(errMessage);
+ }
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ // then clean backup image
+ deleteBackups(new String[] { backupId });
+ }
+
+ @Override
+ public void showHistory(int n) throws IOException {
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ List history = systemTable.getBackupHistory();
+
+ int max = Math.min(n, history.size());
+ for (int i = 0; i < max; i++) {
+ printBackupCompleteData(history.get(i));
+ }
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ private void printBackupCompleteData(BackupCompleteData backupCompleteData) {
+ System.out.println(backupCompleteData.toString());
+ }
+
+ @Override
+ public void backupSetList() throws IOException{
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ List list = systemTable.backupSetList();
+ for (String s : list) {
+ System.out.println(s);
+ }
+ System.out.println("Found " + list.size() + " records");
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void backupSetDescribe(String name) throws IOException{
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ String[] list = systemTable.backupSetDescribe(name);
+ for (String s : list) {
+ System.out.println(s);
+ }
+ System.out.println("Found " + list.length + " records");
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void backupSetDelete(String name) throws IOException {
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ systemTable.backupSetDelete(name);
+ System.out.println("Deleted " + name);
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void backupSetAdd(String name, String[] tablesOrNamespaces) throws IOException {
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ systemTable.backupSetAdd(name, tablesOrNamespaces);
+ System.out.println("Added tables to '" + name + "'");
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
+
+ @Override
+ public void backupSetRemove(String name, String[] tablesOrNamepsaces) throws IOException {
+ BackupSystemTable systemTable = null;
+ try {
+ systemTable = BackupSystemTable.getTable(conf);
+ systemTable.backupSetRemove(name, tablesOrNamepsaces);
+ System.out.println("Removed tables from '" + name + "'");
+ } finally {
+ if(systemTable != null) {
+ systemTable.close();
+ }
+ }
+ }
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 4182b8b..5e12b73 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* General backup commands, options and usage messages
*/
@@ -39,21 +41,61 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
@InterfaceStability.Evolving
public final class BackupCommands {
- private static final String USAGE = "Usage: hbase backup COMMAND\n"
- + "where COMMAND is one of:\n" + " create create a new backup image\n"
- + "Enter \'help COMMAND\' to see help message for each command\n";
-
- private static final String CREATE_CMD_USAGE =
- "Usage: hbase backup create [tables] [-s name] [-convert] "
- + "[-silent]\n" + " type \"full\" to create a full backup image;\n"
- + " \"incremental\" to create an incremental backup image\n"
- + " backup_root_path The full root path to store the backup image,\n"
- + " the prefix can be gpfs, hdfs or webhdfs\n" + " Options:\n"
- + " tables If no tables (\"\") are specified, all tables are backed up. "
- + "Otherwise it is a\n" + " comma separated list of tables.\n"
- + " -s name Use the specified snapshot for full backup\n"
- + " -convert For an incremental backup, convert WAL files to HFiles\n";
+
+ private static final String USAGE = "Usage: hbase backup COMMAND\n"
+ + "where COMMAND is one of:\n"
+ + " create create a new backup image\n"
+ + " cancel cancel an ongoing backup\n"
+ + " delete delete an existing backup image\n"
+ + " describe show the detailed information of a backup image\n"
+ + " history show history of all successful backups\n"
+ + " progress show the progress of the latest backup request\n"
+ + " convert convert incremental backup WAL files into HFiles\n"
+ + " merge merge backup images\n"
+ + " set backup set management\n"
+ + "Enter \'help COMMAND\' to see help message for each command\n";
+
+ private static final String CREATE_CMD_USAGE =
+ "Usage: hbase backup create [tables] [-s name] [-convert] "
+ + "[-silent] [-w workers][-b bandwith]\n" + " type \"full\" to create a full backup image;\n"
+ + " \"incremental\" to create an incremental backup image\n"
+ + " backup_root_path The full root path to store the backup image,\n"
+ + " the prefix can be gpfs, hdfs or webhdfs\n" + " Options:\n"
+ + " tables If no tables (\"\") are specified, all tables are backed up. "
+ + "Otherwise it is a\n" + " comma separated list of tables.\n"
+ + " -s name Use the specified snapshot for full backup\n"
+ + " -convert For an incremental backup, convert WAL files to HFiles\n"
+ + " -w number of parallel workers.\n"
+ + " -b bandwith per one worker (in MB sec)" ;
+
+ private static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n"
+ + " backupId backup image id;\n";
+
+ private static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n"
+ + " backupId backup image id\n";
+
+ private static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [-n N]\n"
+ + " -n N show up to N last backup sessions, default - 10;\n";
+
+ private static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n"
+ + " backupId backup image id;\n";
+
+ private static final String CANCEL_CMD_USAGE = "Usage: hbase backup progress \n"
+ + " backupId backup image id;\n";
+
+ private static final String SET_CMD_USAGE = "Usage: hbase set COMMAND [name] [tables]\n"
+ + " name Backup set name\n"
+ + " tables If no tables (\"\") are specified, all tables will belong to the set. "
+ + "Otherwise it is a\n" + " comma separated list of tables.\n"
+ + "where COMMAND is one of:\n"
+ + " add add tables to a set, crete set if needed\n"
+ + " remove remove tables from set\n"
+ + " list list all sets\n"
+ + " describe describes set\n"
+ + " delete delete backup set\n";
+
+
public interface Command {
void execute() throws IOException;
}
@@ -65,24 +107,49 @@ public final class BackupCommands {
public static Command createCommand(BackupCommand type, CommandLine cmdline) {
Command cmd = null;
switch (type) {
- case CREATE:
- cmd = new CreateCommand(cmdline);
- break;
- case HELP:
- default:
- cmd = new HelpCommand(cmdline);
- break;
+ case CREATE:
+ cmd = new CreateCommand(cmdline);
+ break;
+ case DESCRIBE:
+ cmd = new DescribeCommand(cmdline);
+ break;
+ case PROGRESS:
+ cmd = new ProgressCommand(cmdline);
+ break;
+ case DELETE:
+ cmd = new DeleteCommand(cmdline);
+ break;
+ case CANCEL:
+ cmd = new CancelCommand(cmdline);
+ break;
+ case HISTORY:
+ cmd = new HistoryCommand(cmdline);
+ break;
+ case SET:
+ cmd = new BackupSetCommand(cmdline);
+ break;
+ case HELP:
+ default:
+ cmd = new HelpCommand(cmdline);
+ break;
}
return cmd;
}
- private static class CreateCommand implements Command {
+ public static class CreateCommand implements Command {
CommandLine cmdline;
+
+ Configuration conf;
CreateCommand(CommandLine cmdline) {
this.cmdline = cmdline;
}
-
+
+ @VisibleForTesting
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
@Override
public void execute() throws IOException {
if (cmdline == null || cmdline.getArgs() == null) {
@@ -105,22 +172,52 @@ public final class BackupCommands {
}
String snapshot = cmdline.hasOption('s') ? cmdline.getOptionValue('s') : null;
- String tables = (args.length == 3) ? args[2] : null;
+
+
+ String tables = null;
+ Configuration conf = this.conf != null? this.conf: HBaseConfiguration.create();
+ // Check backup set
+ if (cmdline.hasOption("set")) {
+ String setName = cmdline.getOptionValue("set");
+ tables = getTablesForSet(setName, conf);
+ if (tables == null) throw new IOException("Backup set '" + setName
+ + "' is either empty or does not exist");
+ } else {
+ tables = (args.length == 3) ? args[2] : null;
+ }
+ int bandwidth = cmdline.hasOption('b') ? Integer.parseInt(cmdline.getOptionValue('b')) : -1;
+ int workers = cmdline.hasOption('w') ? Integer.parseInt(cmdline.getOptionValue('w')) : -1;
+
try {
- Configuration conf = HBaseConfiguration.create();
BackupClient client = BackupRestoreFactory.getBackupClient(conf);
- client.create(BackupType.valueOf(args[0].toUpperCase()), toList(tables), args[1], snapshot);
+ client.create(BackupType.valueOf(args[0].toUpperCase()), toList(tables), args[1],
+ snapshot, workers, bandwidth);
} catch (RuntimeException e) {
System.out.println("ERROR: " + e.getMessage());
System.exit(-1);
}
}
+ private String getTablesForSet(String name, Configuration conf)
+ throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ String[] tables = table.backupSetDescribe(name);
+ if (tables == null) return null;
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < tables.length; i++) {
+ sb.append(tables[i]);
+ if (i < tables.length - 1) {
+ sb.append(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+ }
+ }
+ return sb.toString();
+ }
+
private List toList(String tables) {
String[] splits = tables.split(",");
List list = new ArrayList();
- for(String s: splits){
+ for (String s : splits) {
list.add(s);
}
return list;
@@ -157,9 +254,293 @@ public final class BackupCommands {
if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) {
System.out.println(CREATE_CMD_USAGE);
- } // other commands will be supported in future jira
+ } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(type)) {
+ System.out.println(DESCRIBE_CMD_USAGE);
+ } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(type)) {
+ System.out.println(HISTORY_CMD_USAGE);
+ } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(type)) {
+ System.out.println(PROGRESS_CMD_USAGE);
+ } else if (BackupCommand.DELETE.name().equalsIgnoreCase(type)) {
+ System.out.println(DELETE_CMD_USAGE);
+ }
+ if (BackupCommand.CANCEL.name().equalsIgnoreCase(type)) {
+ System.out.println(CANCEL_CMD_USAGE);
+ }
+ if (BackupCommand.SET.name().equalsIgnoreCase(type)) {
+ System.out.println(SET_CMD_USAGE);
+ } else {
+ System.out.println("Unknown command : " + type);
+ System.out.println(USAGE);
+ }
System.exit(0);
}
}
+ private static class DescribeCommand implements Command {
+ CommandLine cmdline;
+
+ DescribeCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("ERROR: missing arguments");
+ System.out.println(DESCRIBE_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length != 1) {
+ System.out.println("ERROR: wrong number of arguments");
+ System.out.println(DESCRIBE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String backupId = args[0];
+ try {
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.describeBackupImage(backupId);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class ProgressCommand implements Command {
+ CommandLine cmdline;
+
+ ProgressCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("No backup id was specified, "
+ + "will retrieve the most recent (ongoing) sessions");
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length > 1) {
+ System.out.println("ERROR: wrong number of arguments: " + args.length);
+ System.out.println(PROGRESS_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String backupId = args == null ? null : args[0];
+ try {
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.showProgress(backupId);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class DeleteCommand implements Command {
+ CommandLine cmdline;
+
+ DeleteCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("No backup id(s) was specified");
+ System.out.println(PROGRESS_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+
+ try {
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.deleteBackups(args);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class CancelCommand implements Command {
+ CommandLine cmdline;
+
+ CancelCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("No backup id(s) was specified, will use the most recent one");
+ }
+ String[] args = cmdline.getArgs();
+ String backupId = args == null || args.length == 0 ? null : args[0];
+ try {
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.cancelBackup(backupId);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class HistoryCommand implements Command {
+ CommandLine cmdline;
+
+ HistoryCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+
+ int n =
+ cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 0 ? 10
+ : parseHistoryLength();
+ try {
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.showHistory(n);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+
+ private int parseHistoryLength() {
+ String value = cmdline.getOptionValue("n");
+ if (value == null) throw new RuntimeException("command line format");
+ return Integer.parseInt(value);
+ }
+ }
+
+ private static class BackupSetCommand implements Command {
+ private final static String SET_ADD_CMD = "add";
+ private final static String SET_REMOVE_CMD = "remove";
+ private final static String SET_DELETE_CMD = "delete";
+ private final static String SET_DESCRIBE_CMD = "describe";
+ private final static String SET_LIST_CMD = "list";
+
+ CommandLine cmdline;
+
+ BackupSetCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+
+ // Command-line must have at least one element
+ if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 0) {
+ throw new IOException("command line format");
+ }
+ String[] args = cmdline.getArgs();
+ String cmdStr = args[0];
+ BackupCommand cmd = getCommand(cmdStr);
+
+ try {
+
+ switch (cmd) {
+ case SET_ADD:
+ processSetAdd(args);
+ break;
+ case SET_REMOVE:
+ processSetRemove(args);
+ break;
+ case SET_DELETE:
+ processSetDelete(args);
+ break;
+ case SET_DESCRIBE:
+ processSetDescribe(args);
+ break;
+ case SET_LIST:
+ processSetList(args);
+ break;
+ default:
+ break;
+
+ }
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+
+ private void processSetList(String[] args) throws IOException {
+ // List all backup set names
+ // does not expect any args
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.backupSetList();
+ }
+
+ private void processSetDescribe(String[] args) throws IOException {
+ if (args == null || args.length != 2) {
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.backupSetDescribe(setName);
+ }
+
+ private void processSetDelete(String[] args) throws IOException {
+ if (args == null || args.length != 2) {
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.backupSetDelete(setName);
+ }
+
+ private void processSetRemove(String[] args) throws IOException {
+ if (args == null || args.length != 3) {
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ String[] tables = args[2].split(",");
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.backupSetRemove(setName, tables);
+ }
+
+ private void processSetAdd(String[] args) throws IOException {
+ if (args == null || args.length != 3) {
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ String[] tables = args[2].split(",");
+ Configuration conf = HBaseConfiguration.create();
+ BackupClient client = BackupRestoreFactory.getBackupClient(conf);
+ client.backupSetAdd(setName, tables);
+ }
+
+ private BackupCommand getCommand(String cmdStr) throws IOException {
+ if (cmdStr.equals(SET_ADD_CMD)) {
+ return BackupCommand.SET_ADD;
+ } else if (cmdStr.equals(SET_REMOVE_CMD)) {
+ return BackupCommand.SET_REMOVE;
+ } else if (cmdStr.equals(SET_DELETE_CMD)) {
+ return BackupCommand.SET_DELETE;
+ } else if (cmdStr.equals(SET_DESCRIBE_CMD)) {
+ return BackupCommand.SET_DESCRIBE;
+ } else if (cmdStr.equals(SET_LIST_CMD)) {
+ return BackupCommand.SET_LIST;
+ } else {
+ throw new IOException("Unknown command for 'set' :" + cmdStr);
+ }
+ }
+
+ }
+
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
index 547e984..1458db1 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -38,6 +40,8 @@ import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder;
import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus;
+import com.google.protobuf.ByteString;
+
/**
* An object to encapsulate the information for each backup request
*/
@@ -45,42 +49,6 @@ import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus
@InterfaceStability.Evolving
public class BackupContext implements Serializable {
- public Map getBackupStatusMap() {
- return backupStatusMap;
- }
-
- public void setBackupStatusMap(Map backupStatusMap) {
- this.backupStatusMap = backupStatusMap;
- }
-
- public HashMap> getTableSetTimestampMap() {
- return tableSetTimestampMap;
- }
-
- public void setTableSetTimestampMap(HashMap> tableSetTimestampMap) {
- this.tableSetTimestampMap = tableSetTimestampMap;
- }
-
- public String getHlogTargetDir() {
- return hlogTargetDir;
- }
-
- public void setType(BackupType type) {
- this.type = type;
- }
-
- public void setTargetRootDir(String targetRootDir) {
- this.targetRootDir = targetRootDir;
- }
-
- public void setTotalBytesCopied(long totalBytesCopied) {
- this.totalBytesCopied = totalBytesCopied;
- }
-
- public void setCancelled(boolean cancelled) {
- this.state = BackupState.CANCELLED;;
- }
-
private static final long serialVersionUID = 2401435114454300992L;
// backup id: a timestamp when we request the backup
@@ -124,8 +92,16 @@ public class BackupContext implements Serializable {
transient private HashMap> tableSetTimestampMap;
// backup progress in %% (0-100)
-
private int progress;
+
+ // distributed job id
+ private byte[] jobId;
+
+ // Number of paralle workers. -1 - system defined
+ private int workers = -1;
+
+ // Bandwidth per worker in MB per sec. -1 - unlimited
+ private int bandwidth = -1;
public BackupContext() {
}
@@ -153,6 +129,66 @@ public class BackupContext implements Serializable {
}
+ public byte[] getJobId() {
+ return jobId;
+ }
+
+ public void setJobId(byte[] jobId) {
+ this.jobId = jobId;
+ }
+
+ public int getWorkers() {
+ return workers;
+ }
+
+ public void setWorkers(int workers) {
+ this.workers = workers;
+ }
+
+ public int getBandwidth() {
+ return bandwidth;
+ }
+
+ public void setBandwidth(int bandwidth) {
+ this.bandwidth = bandwidth;
+ }
+
+ public Map getBackupStatusMap() {
+ return backupStatusMap;
+ }
+
+ public void setBackupStatusMap(Map backupStatusMap) {
+ this.backupStatusMap = backupStatusMap;
+ }
+
+ public HashMap> getTableSetTimestampMap() {
+ return tableSetTimestampMap;
+ }
+
+ public void setTableSetTimestampMap(HashMap> tableSetTimestampMap) {
+ this.tableSetTimestampMap = tableSetTimestampMap;
+ }
+
+ public String getHlogTargetDir() {
+ return hlogTargetDir;
+ }
+
+ public void setType(BackupType type) {
+ this.type = type;
+ }
+
+ public void setTargetRootDir(String targetRootDir) {
+ this.targetRootDir = targetRootDir;
+ }
+
+ public void setTotalBytesCopied(long totalBytesCopied) {
+ this.totalBytesCopied = totalBytesCopied;
+ }
+
+ public void setCancelled(boolean cancelled) {
+ this.state = BackupState.CANCELLED;;
+ }
+
/**
* Set progress string
* @param msg progress message
@@ -367,6 +403,11 @@ public class BackupContext implements Serializable {
builder.setTargetRootDir(getTargetRootDir());
builder.setTotalBytesCopied(getTotalBytesCopied());
builder.setType(BackupProtos.BackupType.valueOf(getType().name()));
+ builder.setWorkersNumber(workers);
+ builder.setBandwidth(bandwidth);
+ if(jobId != null) {
+ builder.setJobId(ByteString.copyFrom(jobId));
+ }
byte[] data = builder.build().toByteArray();
return data;
@@ -410,7 +451,12 @@ public class BackupContext implements Serializable {
context.setStartTs(proto.getStartTs());
context.setTargetRootDir(proto.getTargetRootDir());
context.setTotalBytesCopied(proto.getTotalBytesCopied());
- context.setType(BackupType.valueOf(proto.getType().name()));
+ context.setType(BackupType.valueOf(proto.getType().name()));
+ context.setWorkers(proto.getWorkersNumber());
+ context.setBandwidth(proto.getBandwidth());
+ if(proto.hasJobId()){
+ context.setJobId(proto.getJobId().toByteArray());
+ }
return context;
}
@@ -423,4 +469,36 @@ public class BackupContext implements Serializable {
return map;
}
+ public String getShortDescription() {
+ StringBuffer sb = new StringBuffer();
+ sb.append("ID : " + backupId).append("\n");
+ sb.append("Tables : " + getTableListAsString()).append("\n");
+ sb.append("State : " + getState()).append("\n");
+ Date date = null;
+ Calendar cal = Calendar.getInstance();
+ cal.setTimeInMillis(getStartTs());
+ date = cal.getTime();
+ sb.append("Start time : " + date).append("\n");
+ if (state == BackupState.FAILED) {
+ sb.append("Failed message : " + getFailedMsg()).append("\n");
+ } else if (state == BackupState.RUNNING) {
+ sb.append("Phase : " + getPhase()).append("\n");
+ sb.append("Progress : " + getProgress()).append("\n");
+ } else if (state == BackupState.COMPLETE) {
+ cal = Calendar.getInstance();
+ cal.setTimeInMillis(getEndTs());
+ date = cal.getTime();
+ sb.append("Start time : " + date).append("\n");
+ }
+ return sb.toString();
+ }
+
+ public String getStatusAndProgressAsString() {
+ StringBuffer sb = new StringBuffer();
+ sb.append("id: ").append(getBackupId()).append(" state: ").append(getState())
+ .append(" progress: ").append(getProgress());
+ return sb.toString();
+ }
+
+
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
index 1a0d5ed..c26f4c0 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
@@ -27,11 +27,28 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
+//TODO re-factor move up
public interface BackupCopyService extends Configurable {
static enum Type {
FULL, INCREMENTAL
}
+ /**
+ * Copy backup data
+ * @param backupHandler - handler
+ * @param conf - configuration
+ * @param copyType - copy type
+ * @param options - list of options
+ * @return result (0 - success)
+ * @throws IOException
+ */
public int copy(BackupHandler backupHandler, Configuration conf, BackupCopyService.Type copyType,
String[] options) throws IOException;
+
+ /**
+ * TODO refactor
+ * @param jobHandler - copy job handler
+ * @throws IOException
+ */
+ public void cancelCopyJob(byte[] jobHandler) throws IOException;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
index 0206846..18d9f72 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
@@ -478,11 +478,26 @@ public class BackupHandler implements Callable {
// TODO this below
// backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
int res = 0;
- String[] args = new String[4];
+ int argsLen = 4;
+ if (backupContext.getWorkers() > 0) argsLen += 2;
+ if (backupContext.getBandwidth() > 0) argsLen += 2;
+ String[] args = new String[argsLen];
args[0] = "-snapshot";
args[1] = backupContext.getSnapshotName(table);
args[2] = "-copy-to";
args[3] = backupContext.getBackupStatus(table).getTargetDir();
+ if (backupContext.getWorkers() > 0 && backupContext.getBandwidth() > 0) {
+ args[4] = "-mappers";
+ args[5] = Integer.toString(backupContext.getWorkers());
+ args[6] = "-bandwidth";
+ args[7] = Integer.toString(backupContext.getBandwidth());
+ } else if (backupContext.getWorkers() > 0) {
+ args[4] = "-mappers";
+ args[5] = Integer.toString(backupContext.getWorkers());
+ } else if (backupContext.getBandwidth() > 0) {
+ args[4] = "-bandwidth";
+ args[5] = Integer.toString(backupContext.getBandwidth());
+ }
LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
res = copyService.copy(this, conf, BackupCopyService.Type.FULL, args);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index 578bada..e938335 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -185,11 +185,13 @@ public class BackupManager {
* @param tablelist table list
* @param targetRootDir root dir
* @param snapshot snapshot name
+ * @param workers - number of parallel workers
+ * @param bandwidth - maximum bandwidth per worker
* @return BackupContext context
* @throws BackupException exception
*/
protected BackupContext createBackupContext(String backupId, BackupType type, String tablelist,
- String targetRootDir, String snapshot) throws BackupException {
+ String targetRootDir, String snapshot, int workers, int bandwidth) throws BackupException {
if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory");
@@ -224,9 +226,12 @@ public class BackupManager {
}
// there are one or more tables in the table list
- return new BackupContext(backupId, type,
+ BackupContext context = new BackupContext(backupId, type,
tablelist.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND), targetRootDir,
snapshot);
+ context.setWorkers(workers);
+ context.setBandwidth(bandwidth);
+ return context;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
index d0ce059..7233bfa 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
@@ -37,7 +37,8 @@ public final class BackupRestoreConstants {
public static final String BACKUPID_PREFIX = "backup_";
public static enum BackupCommand {
- CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP,
+ CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, SET,
+ SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST
}
private BackupRestoreConstants() {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index 11df2fa..a7bce76 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -725,4 +725,189 @@ public final class BackupSystemTable {
}
}
}
+
+ /**
+ * BACKUP SETS
+ */
+
+ /**
+ * Get backup set list
+ * @return backup set list
+ * @throws IOException
+ */
+ public List backupSetList() throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(" backup set list");
+ }
+ List list = new ArrayList();
+ Table table = null;
+ ResultScanner scanner = null;
+ try {
+ table = connection.getTable(tableName);
+ Scan scan = BackupSystemTableHelper.createScanForBackupSetList();
+ scan.setMaxVersions(1);
+ scanner = table.getScanner(scan);
+ Result res = null;
+ while ((res = scanner.next()) != null) {
+ res.advance();
+ list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current()));
+ }
+ return list;
+ } finally {
+ if (table != null) {
+ table.close();
+ }
+ }
+ }
+
+ /**
+ * Get backup set description (list of tables)
+ * @param setName set's name
+ * @return list of tables in a backup set
+ * @throws IOException
+ */
+ public String[] backupSetDescribe(String name) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(" backup set describe: "+name);
+ }
+ Table table = null;
+ try {
+ table = connection.getTable(tableName);
+ Get get = BackupSystemTableHelper.createGetForBackupSet(name);
+ Result res = table.get(get);
+ if(res.isEmpty()) return null;
+ res.advance();
+ String[] tables =
+ BackupSystemTableHelper.cellValueToBackupSet(res.current());
+ return tables;
+ } finally {
+ if (table != null) {
+ table.close();
+ }
+ }
+ }
+
+ /**
+ * Add backup set (list of tables)
+ * @param name - set name
+ * @param tables - list of tables, comma-separated
+ * @throws IOException
+ */
+ public void backupSetAdd(String name, String[] newTables) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(" backup set add: "+name);
+ }
+ Table table = null;
+ String[] union = null;
+ try {
+ table = connection.getTable(tableName);
+ Get get = BackupSystemTableHelper.createGetForBackupSet(name);
+ Result res = table.get(get);
+ if(res.isEmpty()) {
+ union = newTables;
+ } else {
+ res.advance();
+ String[] tables =
+ BackupSystemTableHelper.cellValueToBackupSet(res.current());
+ union = merge(tables, newTables);
+ }
+ Put put = BackupSystemTableHelper.createPutForBackupSet(name, union);
+ table.put(put);
+ } finally {
+ if (table != null) {
+ table.close();
+ }
+ }
+ }
+
+ private String[] merge(String[] tables, String[] newTables) {
+ List list = new ArrayList();
+ // Add all from tables
+ for(String t: tables){
+ list.add(t);
+ }
+ for(String nt: newTables){
+ if(list.contains(nt)) continue;
+ list.add(nt);
+ }
+ String[] arr = new String[list.size()];
+ list.toArray(arr);
+ return arr;
+ }
+
+ /**
+ * Remove tables from backup set (list of tables)
+ * @param name - set name
+ * @param tables - list of tables, comma-separated
+ * @throws IOException
+ */
+ public void backupSetRemove(String name, String[] toRemove) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(" backup set describe: "+name);
+ }
+ Table table = null;
+ String[] disjoint = null;
+ try {
+ table = connection.getTable(tableName);
+ Get get = BackupSystemTableHelper.createGetForBackupSet(name);
+ Result res = table.get(get);
+ if(res.isEmpty()) {
+ return;
+ } else {
+ res.advance();
+ String[] tables =
+ BackupSystemTableHelper.cellValueToBackupSet(res.current());
+ disjoint = disjoin(tables, toRemove);
+ }
+ if( disjoint.length > 0){
+ Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint);
+ table.put(put);
+ } else{
+ // Delete
+ backupSetDescribe(name);
+ }
+ } finally {
+ if (table != null) {
+ table.close();
+ }
+ }
+ }
+
+
+ private String[] disjoin(String[] tables, String[] toRemove) {
+ List list = new ArrayList();
+ // Add all from tables
+ for(String t: tables){
+ list.add(t);
+ }
+ for(String nt: toRemove){
+ if(list.contains(nt)){
+ list.remove(nt);
+ }
+ }
+ String[] arr = new String[list.size()];
+ list.toArray(arr);
+ return arr;
+ }
+
+ /**
+ * Delete backup set
+ * @param name set's name
+ * @throws IOException
+ */
+ public void backupSetDelete(String name) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(" backup set delete: "+name);
+ }
+ Table table = null;
+ try {
+ table = connection.getTable(tableName);
+ Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name);
+ table.delete(del);
+ } finally {
+ if (table != null) {
+ table.close();
+ }
+ }
+ }
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
index e1e78f1..79ebd43 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java
@@ -59,6 +59,7 @@ public final class BackupSystemTableHelper {
private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm.";
private final static String RS_LOG_TS_PREFIX = "rslogts.";
private final static String WALS_PREFIX = "wals.";
+ private final static String SET_KEY_PREFIX = "set.";
private final static byte[] col1 = "col1".getBytes();
private final static byte[] col2 = "col2".getBytes();
@@ -324,4 +325,93 @@ public final class BackupSystemTableHelper {
return get;
}
+
+ /**
+ * Creates Scan operation to load backup set list
+ * @return scan operation
+ */
+ static Scan createScanForBackupSetList() {
+ Scan scan = new Scan();
+ byte[] startRow = SET_KEY_PREFIX.getBytes();
+ byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
+ stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
+ scan.setStartRow(startRow);
+ scan.setStopRow(stopRow);
+ scan.addFamily(BackupSystemTable.familyName);
+ return scan;
+ }
+
+ /**
+ * Creates Get operation to load backup set content
+ * @return get operation
+ */
+ static Get createGetForBackupSet(String name) {
+ byte[] row = (SET_KEY_PREFIX + name).getBytes();
+ Get get = new Get(row);
+ get.addFamily(BackupSystemTable.familyName);
+ return get;
+ }
+
+ /**
+ * Creates Delete operation to delete backup set content
+ * @return delete operation
+ */
+ static Delete createDeleteForBackupSet(String name) {
+ byte[] row = (SET_KEY_PREFIX + name).getBytes();
+ Delete del = new Delete(row);
+ del.addFamily(BackupSystemTable.familyName);
+ return del;
+ }
+
+
+ /**
+ * Creates Put operation to update backup set content
+ * @return put operation
+ */
+ static Put createPutForBackupSet(String name, String[] tables) {
+ byte[] row = (SET_KEY_PREFIX + name).getBytes();
+ Put put = new Put(row);
+ byte[] value = convertToByteArray(tables);
+ put.addColumn(BackupSystemTable.familyName, col1, value);
+ return put;
+ }
+
+ private static byte[] convertToByteArray(String[] tables) {
+ StringBuffer sb = new StringBuffer();
+ for(int i=0; i < tables.length; i++){
+ sb.append(tables[i]);
+ if(i < tables.length -1){
+ sb.append(",");
+ }
+ }
+ return sb.toString().getBytes();
+ }
+
+
+ /**
+ * Converts cell to backup set list.
+ * @param current - cell
+ * @return backup set
+ * @throws IOException
+ */
+ static String[] cellValueToBackupSet(Cell current) throws IOException {
+ byte[] data = CellUtil.cloneValue(current);
+ if( data != null && data.length > 0){
+ return new String(data).split(",");
+ } else{
+ return new String[0];
+ }
+ }
+
+ /**
+ * Converts cell key to backup set name.
+ * @param current - cell
+ * @return backup set namd
+ * @throws IOException
+ */
+ static String cellKeyToBackupSetName(Cell current) throws IOException {
+ byte[] data = CellUtil.cloneRow(current);
+ return new String(data).substring(SET_KEY_PREFIX.length());
+ }
+
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
index d6cb3f4..ab28288 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java
@@ -24,6 +24,7 @@ import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
@@ -397,9 +399,36 @@ public final class BackupUtil {
new Long(o.getBackupToken().substring(o.getBackupToken().lastIndexOf("_") + 1));
return thisTS.compareTo(otherTS);
}
+
+ public String toString() {
+ StringBuffer sb = new StringBuffer();
+ sb.append("BackupId =").append(backupToken).append("\n");
+ sb.append("Type =").append(type).append("\n");
+ sb.append("Start time =").append(new Date(Long.parseLong(startTime))).append("\n");
+ sb.append("End time =").append(new Date(Long.parseLong(endTime))).append("\n");
+ sb.append("Snapshot =").append(fromExistingSnapshot).append("\n");
+ sb.append("Root path =").append(backupRootPath).append("\n");
+ sb.append("Type =").append(type).append("\n");
+ sb.append("Table list =").append(tableList).append("\n");
+ sb.append("Bytes copied =").append(type).append("\n");
+ sb.append("Ancestors =").append(BackupUtil.merge(ancestors)).append("\n\n");
+ return sb.toString();
+ }
}
+ public static String merge(List list) {
+ if (list == null || list.size() == 0) return "";
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < list.size(); i++) {
+ sb.append(list.get(i));
+ if (i < list.size() - 1) {
+ sb.append(",");
+ }
+ }
+ return sb.toString();
+ }
+
/**
* Sort history list by start time in descending order.
* @param historyList history list
@@ -549,4 +578,76 @@ public final class BackupUtil {
}
return tableNames;
}
+
+ public static void cleanupBackupData(BackupContext context, Configuration conf)
+ throws IOException
+ {
+ cleanupHLogDir(context, conf);
+ cleanupTargetDir(context, conf);
+ }
+
+ /**
+ * Clean up directories which are generated when DistCp copying hlogs.
+ * @throws IOException
+ */
+ private static void cleanupHLogDir(BackupContext backupContext, Configuration conf)
+ throws IOException {
+
+ String logDir = backupContext.getHLogTargetDir();
+ if (logDir == null) {
+ LOG.warn("No log directory specified for " + backupContext.getBackupId());
+ return;
+ }
+
+ Path rootPath = new Path(logDir).getParent();
+ FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
+ FileStatus[] files = FSUtils.listStatus(fs, rootPath);
+ if (files == null) {
+ return;
+ }
+ for (FileStatus file : files) {
+ LOG.debug("Delete log files: " + file.getPath().getName());
+ FSUtils.delete(fs, file.getPath(), true);
+ }
+ }
+
+ /**
+ * Clean up the data at target directory
+ */
+ private static void cleanupTargetDir(BackupContext backupContext, Configuration conf) {
+ try {
+ // clean up the data at target directory
+ LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId());
+ String targetDir = backupContext.getTargetRootDir();
+ if (targetDir == null) {
+ LOG.warn("No target directory specified for " + backupContext.getBackupId());
+ return;
+ }
+
+ FileSystem outputFs =
+ FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
+
+ for (String table : backupContext.getTables()) {
+ Path targetDirPath =
+ new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
+ backupContext.getBackupId(), table));
+ if (outputFs.delete(targetDirPath, true)) {
+ LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
+ } else {
+ LOG.info("No data has been found in " + targetDirPath.toString() + ".");
+ }
+
+ Path tableDir = targetDirPath.getParent();
+ FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
+ if (backups == null || backups.length == 0) {
+ outputFs.delete(tableDir, true);
+ LOG.debug(tableDir.toString() + " is empty, remove it.");
+ }
+ }
+
+ } catch (IOException e1) {
+ LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at "
+ + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
+ }
+ }
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java
index ab1fb25..7c46f57 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.backup.mapreduce;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
@@ -35,7 +37,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
+import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.tools.DistCp;
import org.apache.hadoop.tools.DistCpConstants;
import org.apache.hadoop.tools.DistCpOptions;
@@ -289,4 +293,26 @@ public class MapReduceBackupCopyService implements BackupCopyService {
}
}
+ @Override
+ public void cancelCopyJob(byte[] jobId) throws IOException {
+ JobID id = new JobID();
+ id.readFields(new DataInputStream(new ByteArrayInputStream(jobId)));
+ Cluster cluster = new Cluster(getConf());
+ try {
+ Job job = cluster.getJob(id);
+ if (job == null) {
+ LOG.error("No job found for " + id);
+ // should we throw exception
+ }
+ if (job.isComplete() || job.isRetired()) {
+ return;
+ }
+
+ job.killJob();
+ LOG.debug("Killed job " + id);
+ } catch (InterruptedException e) {
+ throw new IOException(e);
+ }
+ }
+
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index c75734c..064438d 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -33,11 +33,9 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
-import org.apache.hadoop.hbase.backup.impl.BackupClientImpl;
import org.apache.hadoop.hbase.backup.impl.BackupContext;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl;
import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager;
import org.apache.hadoop.hbase.client.Connection;
@@ -115,9 +113,6 @@ public class TestBackupBase {
LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT";
LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
-
- //BackupClientImpl.setConf(conf1);
- //RestoreClientImpl.setConf(conf1);
createTables();
}
@@ -187,7 +182,7 @@ public class TestBackupBase {
}
private BackupContext getBackupContext(String backupId) throws IOException {
- Configuration conf = conf1;//BackupClientImpl.getConf();
+ Configuration conf = conf1;
BackupSystemTable table = BackupSystemTable.getTable(conf);
BackupContext status = table.readBackupStatus(backupId);
return status;
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
new file mode 100644
index 0000000..b2fe3e2
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupDelete extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupDelete.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly. Verify that history
+ * works as expected
+ * @throws Exception
+ */
+ @Test
+ public void testBackupDelete() throws Exception {
+ LOG.info("test backup delete on a single table with data");
+ List tableList = toList(table1.getNameAsString());
+ String backupId = getBackupClient().create(BackupType.FULL, tableList,
+ BACKUP_ROOT_DIR, null);
+ assertTrue(checkSucceeded(backupId));
+ LOG.info("backup complete");
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ System.setOut(new PrintStream(baos));
+
+ String[] backupIds = new String[] { backupId };
+ getBackupClient().deleteBackups(backupIds);
+
+ LOG.info("delete_backup");
+ String output = baos.toString();
+ LOG.info(baos.toString());
+ assertTrue(output.indexOf("Delete backup for backupID=" + backupId + " completed.") >= 0);
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
new file mode 100644
index 0000000..1409628
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.backup.impl.BackupContext;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupDescribe extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupDescribe.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly. Verify that describe
+ * works as expected
+ * @throws Exception
+ */
+ @Test
+ public void testBackupDescribe() throws Exception {
+
+ LOG.info("test backup describe on a single table with data");
+
+ List tableList = toList(table1.getNameAsString());
+ String backupId = getBackupClient().create(BackupType.FULL, tableList,
+ BACKUP_ROOT_DIR, null);
+
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ System.setOut(new PrintStream(baos));
+
+ getBackupClient().describeBackupImage(backupId);
+ String responce = baos.toString();
+ assertTrue(responce.indexOf(backupId) > 0);
+ assertTrue(responce.indexOf("COMPLETE") > 0);
+
+ BackupSystemTable table = BackupSystemTable.getTable(conf1);
+ BackupContext status = table.readBackupStatus(backupId);
+ String desc = status.getShortDescription();
+ assertTrue(responce.indexOf(desc) >= 0);
+
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
new file mode 100644
index 0000000..0bf276d
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupShowHistory extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupShowHistory.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly. Verify that history
+ * works as expected
+ * @throws Exception
+ */
+ @Test
+ public void testBackupHistory() throws Exception {
+
+ LOG.info("test backup history on a single table with data");
+ List tableList = toList(table1.getNameAsString());
+ String backupId = getBackupClient().create(BackupType.FULL, tableList,
+ BACKUP_ROOT_DIR, null);
+ assertTrue(checkSucceeded(backupId));
+ LOG.info("backup complete");
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ System.setOut(new PrintStream(baos));
+
+ getBackupClient().showHistory(10);
+
+ LOG.info("show_history");
+ String output = baos.toString();
+ LOG.info(baos.toString());
+ assertTrue(output.indexOf(backupId) > 0);
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
new file mode 100644
index 0000000..ffeb94f
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupStatusProgress extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupStatusProgress.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testBackupStatusProgress() throws Exception {
+
+ LOG.info("test backup status/progress on a single table with data");
+ List tableList = toList(table1.getNameAsString());
+ String backupId = getBackupClient().create(BackupType.FULL, tableList,
+ BACKUP_ROOT_DIR, null); LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ System.setOut(new PrintStream(baos));
+
+ getBackupClient().describeBackupImage(backupId);
+ String responce = baos.toString();
+ assertTrue(responce.indexOf(backupId) > 0);
+ assertTrue(responce.indexOf("COMPLETE") > 0);
+
+ baos = new ByteArrayOutputStream();
+ System.setOut(new PrintStream(baos));
+
+ getBackupClient().showProgress(backupId);
+ responce = baos.toString();
+ LOG.info(responce);
+ assertTrue(responce.indexOf(backupId) > 0);
+ assertTrue(responce.indexOf("COMPLETE") > 0);
+
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
index 7e5e966..0f8c38e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -133,6 +133,40 @@ public class TestBackupSystemTable {
}
@Test
+ public void testBackupDelete() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ int n = 10;
+ List list = createBackupContextList(n);
+
+ // Load data
+ for (BackupContext bc : list) {
+ // Make sure we set right status
+ bc.setState(BackupState.COMPLETE);
+ table.updateBackupStatus(bc);
+ }
+
+ // Verify exists
+ for (BackupContext bc : list) {
+ assertNotNull(table.readBackupStatus(bc.getBackupId()));
+ }
+
+ // Delete all
+ for (BackupContext bc : list) {
+ table.deleteBackupStatus(bc.getBackupId());
+ }
+
+ // Verify do not exists
+ for (BackupContext bc : list) {
+ assertNull(table.readBackupStatus(bc.getBackupId()));
+ }
+
+ cleanBackupTable();
+
+ }
+
+
+
+ @Test
public void testRegionServerLastLogRollResults() throws IOException {
BackupSystemTable table = BackupSystemTable.getTable(conf);
@@ -295,6 +329,138 @@ public class TestBackupSystemTable {
cleanBackupTable();
}
+
+ /**
+ * Backup set tests
+ */
+
+ @Test
+ public void testBackupSetAddNotExists() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3" };
+ String setName = "name";
+ table.backupSetAdd(setName, tables);
+ String[] tnames = table.backupSetDescribe(setName);
+ assertTrue(tnames != null);
+ assertTrue(tnames.length == tables.length);
+ for (int i = 0; i < tnames.length; i++) {
+ assertTrue(tnames[i].equals(tables[i]));
+ }
+
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testBackupSetAddExists() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3" };
+ String setName = "name";
+ table.backupSetAdd(setName, tables);
+ String[] addTables = new String[] { "table4", "table5", "table6" };
+ table.backupSetAdd(setName, addTables);
+
+ String[] tnames = table.backupSetDescribe(setName);
+ assertTrue(tnames != null);
+ assertTrue(tnames.length == tables.length + addTables.length);
+ for (int i = 0; i < tnames.length; i++) {
+ assertTrue(tnames[i].equals("table" + (i + 1)));
+ }
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testBackupSetAddExistsIntersects() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3" };
+ String setName = "name";
+ table.backupSetAdd(setName, tables);
+ String[] addTables = new String[] { "table3", "table4", "table5", "table6" };
+ table.backupSetAdd(setName, addTables);
+
+ String[] tnames = table.backupSetDescribe(setName);
+ assertTrue(tnames != null);
+ assertTrue(tnames.length == tables.length + addTables.length - 1);
+ for (int i = 0; i < tnames.length; i++) {
+ assertTrue(tnames[i].equals("table" + (i + 1)));
+ }
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testBackupSetRemoveSomeNotExists() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3", "table4" };
+ String setName = "name";
+ table.backupSetAdd(setName, tables);
+ String[] removeTables = new String[] { "table4", "table5", "table6" };
+ table.backupSetRemove(setName, removeTables);
+
+ String[] tnames = table.backupSetDescribe(setName);
+ assertTrue(tnames != null);
+ assertTrue(tnames.length == tables.length - 1);
+ for (int i = 0; i < tnames.length; i++) {
+ assertTrue(tnames[i].equals("table" + (i + 1)));
+ }
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testBackupSetRemove() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3", "table4" };
+ String setName = "name";
+ table.backupSetAdd(setName, tables);
+ String[] removeTables = new String[] { "table4", "table3" };
+ table.backupSetRemove(setName, removeTables);
+
+ String[] tnames = table.backupSetDescribe(setName);
+ assertTrue(tnames != null);
+ assertTrue(tnames.length == tables.length - 2);
+ for (int i = 0; i < tnames.length; i++) {
+ assertTrue(tnames[i].equals("table" + (i + 1)));
+ }
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testBackupSetDelete() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3", "table4" };
+ String setName = "name";
+ table.backupSetAdd(setName, tables);
+ table.backupSetDelete(setName);
+
+ String[] tnames = table.backupSetDescribe(setName);
+ assertTrue(tnames == null);
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testBackupSetList() throws IOException {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] tables = new String[] { "table1", "table2", "table3", "table4" };
+ String setName1 = "name1";
+ String setName2 = "name2";
+ table.backupSetAdd(setName1, tables);
+ table.backupSetAdd(setName2, tables);
+
+ List list = table.backupSetList();
+
+ assertTrue(list.size() == 2);
+ assertTrue(list.get(0).equals(setName1));
+ assertTrue(list.get(1).equals(setName2));
+
+ cleanBackupTable();
+ }
+
private boolean compare(BackupContext ctx, BackupCompleteData data) {
return ctx.getBackupId().equals(data.getBackupToken())
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
new file mode 100644
index 0000000..72e7905
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackupSet extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullBackupSet.class);
+
+
+ /**
+ * Verify that full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSetExist() throws Exception {
+
+ LOG.info("TFBSE test full backup, backup set exists");
+
+ //Create set
+ BackupSystemTable table = BackupSystemTable.getTable(conf1);
+ String name = "name";
+ table.backupSetAdd(name, new String[]{ table1.getNameAsString()});
+
+ String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, "-set", name };
+ // Run backup
+ BackupDriver.setConf(conf1);
+ BackupDriver.main(args);
+
+ ArrayList backups = table.getBackupHistory();
+ assertTrue(backups.size() == 1);
+ String backupId = backups.get(0).getBackupToken();
+ assertTrue(checkSucceeded(backupId));
+ LOG.info("TFBSE backup complete");
+
+ }
+
+ @Test
+ public void testFullBackupSetDoesNotExist() throws Exception {
+
+ LOG.info("TFBSE test full backup, backup set does not exist");
+ String name = "name1";
+ String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, "-set", name };
+ // Run backup
+ try{
+ BackupDriver.setConf(conf1);
+ BackupDriver.main(args);
+ assertTrue(false);
+ } catch(Exception e){
+ LOG.info("TFBSE Expected exception ", e);
+ }
+
+ }
+
+}