diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index f19da537ff6fa726069d9a70935d7346bed0a399..3cc846cc85a1ac36bc41b7fb914c43dc0bd31c6c 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -217,6 +217,7 @@ public class HBaseAdmin implements Admin { // want to wait a long time. private final int retryLongerMultiplier; private final int syncWaitTimeout; + private final long backupWaitTimeout; private boolean aborted; private int operationTimeout; @@ -243,7 +244,8 @@ public class HBaseAdmin implements Admin { HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.syncWaitTimeout = this.conf.getInt( "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min - + this.backupWaitTimeout = this.conf.getInt( + "hbase.client.backup.wait.timeout.sec", 24 * 3600); // 24 h this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); this.ng = this.connection.getNonceGenerator(); @@ -1591,8 +1593,8 @@ public class HBaseAdmin implements Admin { public String backupTables(final BackupRequest userRequest) throws IOException { return get( backupTablesAsync(userRequest), - syncWaitTimeout, - TimeUnit.MILLISECONDS); + backupWaitTimeout, + TimeUnit.SECONDS); } public static class TableBackupFuture extends TableFuture { diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java index a1a1a789826779f53c609cd0c2b3c2ab2a5be67d..4699c8177d868739c0fa6df106869fae783be46c 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java @@ -4412,85 +4412,55 @@ public final class BackupProtos { */ long getCompleteTs(); - // required int64 total_bytes = 7; + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; /** - * required int64 total_bytes = 7; - */ - boolean hasTotalBytes(); - /** - * required int64 total_bytes = 7; - */ - long getTotalBytes(); - - // optional int64 log_bytes = 8; - /** - * optional int64 log_bytes = 8; - */ - boolean hasLogBytes(); - /** - * optional int64 log_bytes = 8; - */ - long getLogBytes(); - - // repeated .hbase.pb.TableServerTimestamp tst_map = 9; - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ java.util.List getTstMapList(); /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index); /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ int getTstMapCount(); /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ java.util.List getTstMapOrBuilderList(); /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( int index); - // repeated .hbase.pb.BackupImage dependent_backup_image = 10; + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ java.util.List getDependentBackupImageList(); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ int getDependentBackupImageCount(); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ java.util.List getDependentBackupImageOrBuilderList(); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( int index); - - // required bool compacted = 11; - /** - * required bool compacted = 11; - */ - boolean hasCompacted(); - /** - * required bool compacted = 11; - */ - boolean getCompacted(); } /** * Protobuf type {@code hbase.pb.BackupManifest} @@ -4582,37 +4552,22 @@ public final class BackupProtos { completeTs_ = input.readUInt64(); break; } - case 56: { - bitField0_ |= 0x00000020; - totalBytes_ = input.readInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000040; - logBytes_ = input.readInt64(); - break; - } - case 74: { - if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { tstMap_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000100; + mutable_bitField0_ |= 0x00000040; } tstMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.PARSER, extensionRegistry)); break; } - case 82: { - if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { dependentBackupImage_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000200; + mutable_bitField0_ |= 0x00000080; } dependentBackupImage_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); break; } - case 88: { - bitField0_ |= 0x00000080; - compacted_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4624,10 +4579,10 @@ public final class BackupProtos { if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { tableList_ = java.util.Collections.unmodifiableList(tableList_); } - if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { tstMap_ = java.util.Collections.unmodifiableList(tstMap_); } - if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); } this.unknownFields = unknownFields.build(); @@ -4832,126 +4787,78 @@ public final class BackupProtos { return completeTs_; } - // required int64 total_bytes = 7; - public static final int TOTAL_BYTES_FIELD_NUMBER = 7; - private long totalBytes_; - /** - * required int64 total_bytes = 7; - */ - public boolean hasTotalBytes() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * required int64 total_bytes = 7; - */ - public long getTotalBytes() { - return totalBytes_; - } - - // optional int64 log_bytes = 8; - public static final int LOG_BYTES_FIELD_NUMBER = 8; - private long logBytes_; - /** - * optional int64 log_bytes = 8; - */ - public boolean hasLogBytes() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional int64 log_bytes = 8; - */ - public long getLogBytes() { - return logBytes_; - } - - // repeated .hbase.pb.TableServerTimestamp tst_map = 9; - public static final int TST_MAP_FIELD_NUMBER = 9; + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + public static final int TST_MAP_FIELD_NUMBER = 7; private java.util.List tstMap_; /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public java.util.List getTstMapList() { return tstMap_; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public java.util.List getTstMapOrBuilderList() { return tstMap_; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public int getTstMapCount() { return tstMap_.size(); } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { return tstMap_.get(index); } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( int index) { return tstMap_.get(index); } - // repeated .hbase.pb.BackupImage dependent_backup_image = 10; - public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 10; + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 8; private java.util.List dependentBackupImage_; /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public java.util.List getDependentBackupImageList() { return dependentBackupImage_; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public java.util.List getDependentBackupImageOrBuilderList() { return dependentBackupImage_; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public int getDependentBackupImageCount() { return dependentBackupImage_.size(); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { return dependentBackupImage_.get(index); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( int index) { return dependentBackupImage_.get(index); } - // required bool compacted = 11; - public static final int COMPACTED_FIELD_NUMBER = 11; - private boolean compacted_; - /** - * required bool compacted = 11; - */ - public boolean hasCompacted() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * required bool compacted = 11; - */ - public boolean getCompacted() { - return compacted_; - } - private void initFields() { version_ = ""; backupId_ = ""; @@ -4959,11 +4866,8 @@ public final class BackupProtos { tableList_ = java.util.Collections.emptyList(); startTs_ = 0L; completeTs_ = 0L; - totalBytes_ = 0L; - logBytes_ = 0L; tstMap_ = java.util.Collections.emptyList(); dependentBackupImage_ = java.util.Collections.emptyList(); - compacted_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4990,14 +4894,6 @@ public final class BackupProtos { memoizedIsInitialized = 0; return false; } - if (!hasTotalBytes()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCompacted()) { - memoizedIsInitialized = 0; - return false; - } for (int i = 0; i < getTableListCount(); i++) { if (!getTableList(i).isInitialized()) { memoizedIsInitialized = 0; @@ -5041,20 +4937,11 @@ public final class BackupProtos { if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt64(6, completeTs_); } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeInt64(7, totalBytes_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeInt64(8, logBytes_); - } for (int i = 0; i < tstMap_.size(); i++) { - output.writeMessage(9, tstMap_.get(i)); + output.writeMessage(7, tstMap_.get(i)); } for (int i = 0; i < dependentBackupImage_.size(); i++) { - output.writeMessage(10, dependentBackupImage_.get(i)); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeBool(11, compacted_); + output.writeMessage(8, dependentBackupImage_.get(i)); } getUnknownFields().writeTo(output); } @@ -5089,25 +4976,13 @@ public final class BackupProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, completeTs_); } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(7, totalBytes_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(8, logBytes_); - } for (int i = 0; i < tstMap_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, tstMap_.get(i)); + .computeMessageSize(7, tstMap_.get(i)); } for (int i = 0; i < dependentBackupImage_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(10, dependentBackupImage_.get(i)); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(11, compacted_); + .computeMessageSize(8, dependentBackupImage_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -5159,25 +5034,10 @@ public final class BackupProtos { result = result && (getCompleteTs() == other.getCompleteTs()); } - result = result && (hasTotalBytes() == other.hasTotalBytes()); - if (hasTotalBytes()) { - result = result && (getTotalBytes() - == other.getTotalBytes()); - } - result = result && (hasLogBytes() == other.hasLogBytes()); - if (hasLogBytes()) { - result = result && (getLogBytes() - == other.getLogBytes()); - } result = result && getTstMapList() .equals(other.getTstMapList()); result = result && getDependentBackupImageList() .equals(other.getDependentBackupImageList()); - result = result && (hasCompacted() == other.hasCompacted()); - if (hasCompacted()) { - result = result && (getCompacted() - == other.getCompacted()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5215,14 +5075,6 @@ public final class BackupProtos { hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCompleteTs()); } - if (hasTotalBytes()) { - hash = (37 * hash) + TOTAL_BYTES_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTotalBytes()); - } - if (hasLogBytes()) { - hash = (37 * hash) + LOG_BYTES_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLogBytes()); - } if (getTstMapCount() > 0) { hash = (37 * hash) + TST_MAP_FIELD_NUMBER; hash = (53 * hash) + getTstMapList().hashCode(); @@ -5231,10 +5083,6 @@ public final class BackupProtos { hash = (37 * hash) + DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER; hash = (53 * hash) + getDependentBackupImageList().hashCode(); } - if (hasCompacted()) { - hash = (37 * hash) + COMPACTED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getCompacted()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -5363,24 +5211,18 @@ public final class BackupProtos { bitField0_ = (bitField0_ & ~0x00000010); completeTs_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); - totalBytes_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - logBytes_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); if (tstMapBuilder_ == null) { tstMap_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000040); } else { tstMapBuilder_.clear(); } if (dependentBackupImageBuilder_ == null) { dependentBackupImage_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000080); } else { dependentBackupImageBuilder_.clear(); } - compacted_ = false; - bitField0_ = (bitField0_ & ~0x00000400); return this; } @@ -5438,36 +5280,24 @@ public final class BackupProtos { to_bitField0_ |= 0x00000010; } result.completeTs_ = completeTs_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000020; - } - result.totalBytes_ = totalBytes_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; - } - result.logBytes_ = logBytes_; if (tstMapBuilder_ == null) { - if (((bitField0_ & 0x00000100) == 0x00000100)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { tstMap_ = java.util.Collections.unmodifiableList(tstMap_); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000040); } result.tstMap_ = tstMap_; } else { result.tstMap_ = tstMapBuilder_.build(); } if (dependentBackupImageBuilder_ == null) { - if (((bitField0_ & 0x00000200) == 0x00000200)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000080); } result.dependentBackupImage_ = dependentBackupImage_; } else { result.dependentBackupImage_ = dependentBackupImageBuilder_.build(); } - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000080; - } - result.compacted_ = compacted_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -5529,17 +5359,11 @@ public final class BackupProtos { if (other.hasCompleteTs()) { setCompleteTs(other.getCompleteTs()); } - if (other.hasTotalBytes()) { - setTotalBytes(other.getTotalBytes()); - } - if (other.hasLogBytes()) { - setLogBytes(other.getLogBytes()); - } if (tstMapBuilder_ == null) { if (!other.tstMap_.isEmpty()) { if (tstMap_.isEmpty()) { tstMap_ = other.tstMap_; - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000040); } else { ensureTstMapIsMutable(); tstMap_.addAll(other.tstMap_); @@ -5552,7 +5376,7 @@ public final class BackupProtos { tstMapBuilder_.dispose(); tstMapBuilder_ = null; tstMap_ = other.tstMap_; - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000040); tstMapBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getTstMapFieldBuilder() : null; @@ -5565,7 +5389,7 @@ public final class BackupProtos { if (!other.dependentBackupImage_.isEmpty()) { if (dependentBackupImage_.isEmpty()) { dependentBackupImage_ = other.dependentBackupImage_; - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000080); } else { ensureDependentBackupImageIsMutable(); dependentBackupImage_.addAll(other.dependentBackupImage_); @@ -5578,7 +5402,7 @@ public final class BackupProtos { dependentBackupImageBuilder_.dispose(); dependentBackupImageBuilder_ = null; dependentBackupImage_ = other.dependentBackupImage_; - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000080); dependentBackupImageBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getDependentBackupImageFieldBuilder() : null; @@ -5587,9 +5411,6 @@ public final class BackupProtos { } } } - if (other.hasCompacted()) { - setCompacted(other.getCompacted()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5615,14 +5436,6 @@ public final class BackupProtos { return false; } - if (!hasTotalBytes()) { - - return false; - } - if (!hasCompacted()) { - - return false; - } for (int i = 0; i < getTableListCount(); i++) { if (!getTableList(i).isInitialized()) { @@ -6153,79 +5966,13 @@ public final class BackupProtos { return this; } - // required int64 total_bytes = 7; - private long totalBytes_ ; - /** - * required int64 total_bytes = 7; - */ - public boolean hasTotalBytes() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * required int64 total_bytes = 7; - */ - public long getTotalBytes() { - return totalBytes_; - } - /** - * required int64 total_bytes = 7; - */ - public Builder setTotalBytes(long value) { - bitField0_ |= 0x00000040; - totalBytes_ = value; - onChanged(); - return this; - } - /** - * required int64 total_bytes = 7; - */ - public Builder clearTotalBytes() { - bitField0_ = (bitField0_ & ~0x00000040); - totalBytes_ = 0L; - onChanged(); - return this; - } - - // optional int64 log_bytes = 8; - private long logBytes_ ; - /** - * optional int64 log_bytes = 8; - */ - public boolean hasLogBytes() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional int64 log_bytes = 8; - */ - public long getLogBytes() { - return logBytes_; - } - /** - * optional int64 log_bytes = 8; - */ - public Builder setLogBytes(long value) { - bitField0_ |= 0x00000080; - logBytes_ = value; - onChanged(); - return this; - } - /** - * optional int64 log_bytes = 8; - */ - public Builder clearLogBytes() { - bitField0_ = (bitField0_ & ~0x00000080); - logBytes_ = 0L; - onChanged(); - return this; - } - - // repeated .hbase.pb.TableServerTimestamp tst_map = 9; + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; private java.util.List tstMap_ = java.util.Collections.emptyList(); private void ensureTstMapIsMutable() { - if (!((bitField0_ & 0x00000100) == 0x00000100)) { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { tstMap_ = new java.util.ArrayList(tstMap_); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000040; } } @@ -6233,7 +5980,7 @@ public final class BackupProtos { org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> tstMapBuilder_; /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public java.util.List getTstMapList() { if (tstMapBuilder_ == null) { @@ -6243,7 +5990,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public int getTstMapCount() { if (tstMapBuilder_ == null) { @@ -6253,7 +6000,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { if (tstMapBuilder_ == null) { @@ -6263,7 +6010,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder setTstMap( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { @@ -6280,7 +6027,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder setTstMap( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { @@ -6294,7 +6041,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder addTstMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { if (tstMapBuilder_ == null) { @@ -6310,7 +6057,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder addTstMap( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { @@ -6327,7 +6074,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder addTstMap( org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { @@ -6341,7 +6088,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder addTstMap( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { @@ -6355,7 +6102,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder addAllTstMap( java.lang.Iterable values) { @@ -6369,12 +6116,12 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder clearTstMap() { if (tstMapBuilder_ == null) { tstMap_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000040); onChanged(); } else { tstMapBuilder_.clear(); @@ -6382,7 +6129,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public Builder removeTstMap(int index) { if (tstMapBuilder_ == null) { @@ -6395,14 +6142,14 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder getTstMapBuilder( int index) { return getTstMapFieldBuilder().getBuilder(index); } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( int index) { @@ -6412,7 +6159,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public java.util.List getTstMapOrBuilderList() { @@ -6423,14 +6170,14 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder() { return getTstMapFieldBuilder().addBuilder( org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder( int index) { @@ -6438,7 +6185,7 @@ public final class BackupProtos { index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; */ public java.util.List getTstMapBuilderList() { @@ -6451,7 +6198,7 @@ public final class BackupProtos { tstMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>( tstMap_, - ((bitField0_ & 0x00000100) == 0x00000100), + ((bitField0_ & 0x00000040) == 0x00000040), getParentForChildren(), isClean()); tstMap_ = null; @@ -6459,13 +6206,13 @@ public final class BackupProtos { return tstMapBuilder_; } - // repeated .hbase.pb.BackupImage dependent_backup_image = 10; + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; private java.util.List dependentBackupImage_ = java.util.Collections.emptyList(); private void ensureDependentBackupImageIsMutable() { - if (!((bitField0_ & 0x00000200) == 0x00000200)) { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { dependentBackupImage_ = new java.util.ArrayList(dependentBackupImage_); - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000080; } } @@ -6473,7 +6220,7 @@ public final class BackupProtos { org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> dependentBackupImageBuilder_; /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public java.util.List getDependentBackupImageList() { if (dependentBackupImageBuilder_ == null) { @@ -6483,7 +6230,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public int getDependentBackupImageCount() { if (dependentBackupImageBuilder_ == null) { @@ -6493,7 +6240,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { if (dependentBackupImageBuilder_ == null) { @@ -6503,7 +6250,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder setDependentBackupImage( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { @@ -6520,7 +6267,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder setDependentBackupImage( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { @@ -6534,7 +6281,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder addDependentBackupImage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { if (dependentBackupImageBuilder_ == null) { @@ -6550,7 +6297,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder addDependentBackupImage( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { @@ -6567,7 +6314,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder addDependentBackupImage( org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { @@ -6581,7 +6328,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder addDependentBackupImage( int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { @@ -6595,7 +6342,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder addAllDependentBackupImage( java.lang.Iterable values) { @@ -6609,12 +6356,12 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder clearDependentBackupImage() { if (dependentBackupImageBuilder_ == null) { dependentBackupImage_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000080); onChanged(); } else { dependentBackupImageBuilder_.clear(); @@ -6622,7 +6369,7 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public Builder removeDependentBackupImage(int index) { if (dependentBackupImageBuilder_ == null) { @@ -6635,14 +6382,14 @@ public final class BackupProtos { return this; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getDependentBackupImageBuilder( int index) { return getDependentBackupImageFieldBuilder().getBuilder(index); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( int index) { @@ -6652,7 +6399,7 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public java.util.List getDependentBackupImageOrBuilderList() { @@ -6663,14 +6410,14 @@ public final class BackupProtos { } } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder() { return getDependentBackupImageFieldBuilder().addBuilder( org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder( int index) { @@ -6678,7 +6425,7 @@ public final class BackupProtos { index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ public java.util.List getDependentBackupImageBuilderList() { @@ -6691,7 +6438,7 @@ public final class BackupProtos { dependentBackupImageBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( dependentBackupImage_, - ((bitField0_ & 0x00000200) == 0x00000200), + ((bitField0_ & 0x00000080) == 0x00000080), getParentForChildren(), isClean()); dependentBackupImage_ = null; @@ -6699,39 +6446,6 @@ public final class BackupProtos { return dependentBackupImageBuilder_; } - // required bool compacted = 11; - private boolean compacted_ ; - /** - * required bool compacted = 11; - */ - public boolean hasCompacted() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * required bool compacted = 11; - */ - public boolean getCompacted() { - return compacted_; - } - /** - * required bool compacted = 11; - */ - public Builder setCompacted(boolean value) { - bitField0_ |= 0x00000400; - compacted_ = value; - onChanged(); - return this; - } - /** - * required bool compacted = 11; - */ - public Builder clearCompacted() { - bitField0_ = (bitField0_ & ~0x00000400); - compacted_ = false; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest) } @@ -7642,7 +7356,7 @@ public final class BackupProtos { // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus) } - public interface BackupContextOrBuilder + public interface BackupInfoOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string backup_id = 1; @@ -7685,25 +7399,25 @@ public final class BackupProtos { com.google.protobuf.ByteString getTargetRootDirBytes(); - // optional .hbase.pb.BackupContext.BackupState state = 4; + // optional .hbase.pb.BackupInfo.BackupState state = 4; /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ boolean hasState(); /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState(); - // optional .hbase.pb.BackupContext.BackupPhase phase = 5; + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ boolean hasPhase(); /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase(); // optional string failed_message = 6; /** @@ -7765,60 +7479,70 @@ public final class BackupProtos { */ long getEndTs(); - // optional int64 total_bytes_copied = 10; + // optional uint32 progress = 10; /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - boolean hasTotalBytesCopied(); + boolean hasProgress(); /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - long getTotalBytesCopied(); + int getProgress(); - // optional string hlog_target_dir = 11; + // optional string job_id = 11; /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - boolean hasHlogTargetDir(); + boolean hasJobId(); /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - java.lang.String getHlogTargetDir(); + java.lang.String getJobId(); /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ com.google.protobuf.ByteString - getHlogTargetDirBytes(); + getJobIdBytes(); - // optional uint32 progress = 12; + // required uint32 workers_number = 12; /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - boolean hasProgress(); + boolean hasWorkersNumber(); /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - int getProgress(); + int getWorkersNumber(); + + // required uint64 bandwidth = 13; + /** + * required uint64 bandwidth = 13; + */ + boolean hasBandwidth(); + /** + * required uint64 bandwidth = 13; + */ + long getBandwidth(); } /** - * Protobuf type {@code hbase.pb.BackupContext} + * Protobuf type {@code hbase.pb.BackupInfo} */ - public static final class BackupContext extends + public static final class BackupInfo extends com.google.protobuf.GeneratedMessage - implements BackupContextOrBuilder { - // Use BackupContext.newBuilder() to construct. - private BackupContext(com.google.protobuf.GeneratedMessage.Builder builder) { + implements BackupInfoOrBuilder { + // Use BackupInfo.newBuilder() to construct. + private BackupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BackupContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private BackupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BackupContext defaultInstance; - public static BackupContext getDefaultInstance() { + private static final BackupInfo defaultInstance; + public static BackupInfo getDefaultInstance() { return defaultInstance; } - public BackupContext getDefaultInstanceForType() { + public BackupInfo getDefaultInstanceForType() { return defaultInstance; } @@ -7828,7 +7552,7 @@ public final class BackupProtos { getUnknownFields() { return this.unknownFields; } - private BackupContext( + private BackupInfo( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -7874,7 +7598,7 @@ public final class BackupProtos { } case 32: { int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.valueOf(rawValue); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(4, rawValue); } else { @@ -7885,7 +7609,7 @@ public final class BackupProtos { } case 40: { int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.valueOf(rawValue); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(5, rawValue); } else { @@ -7919,17 +7643,22 @@ public final class BackupProtos { } case 80: { bitField0_ |= 0x00000100; - totalBytesCopied_ = input.readInt64(); + progress_ = input.readUInt32(); break; } case 90: { bitField0_ |= 0x00000200; - hlogTargetDir_ = input.readBytes(); + jobId_ = input.readBytes(); break; } case 96: { bitField0_ |= 0x00000400; - progress_ = input.readUInt32(); + workersNumber_ = input.readUInt32(); + break; + } + case 104: { + bitField0_ |= 0x00000800; + bandwidth_ = input.readUInt64(); break; } } @@ -7949,33 +7678,33 @@ public final class BackupProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupContext parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupInfo parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupContext(input, extensionRegistry); + return new BackupInfo(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } /** - * Protobuf enum {@code hbase.pb.BackupContext.BackupState} + * Protobuf enum {@code hbase.pb.BackupInfo.BackupState} */ public enum BackupState implements com.google.protobuf.ProtocolMessageEnum { @@ -8058,7 +7787,7 @@ public final class BackupProtos { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDescriptor().getEnumTypes().get(0); + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(0); } private static final BackupState[] VALUES = values(); @@ -8080,11 +7809,11 @@ public final class BackupProtos { this.value = value; } - // @@protoc_insertion_point(enum_scope:hbase.pb.BackupContext.BackupState) + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupState) } /** - * Protobuf enum {@code hbase.pb.BackupContext.BackupPhase} + * Protobuf enum {@code hbase.pb.BackupInfo.BackupPhase} */ public enum BackupPhase implements com.google.protobuf.ProtocolMessageEnum { @@ -8176,7 +7905,7 @@ public final class BackupProtos { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDescriptor().getEnumTypes().get(1); + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(1); } private static final BackupPhase[] VALUES = values(); @@ -8198,7 +7927,7 @@ public final class BackupProtos { this.value = value; } - // @@protoc_insertion_point(enum_scope:hbase.pb.BackupContext.BackupPhase) + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupPhase) } private int bitField0_; @@ -8304,35 +8033,35 @@ public final class BackupProtos { } } - // optional .hbase.pb.BackupContext.BackupState state = 4; + // optional .hbase.pb.BackupInfo.BackupState state = 4; public static final int STATE_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_; /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ public boolean hasState() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { return state_; } - // optional .hbase.pb.BackupContext.BackupPhase phase = 5; + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; public static final int PHASE_FIELD_NUMBER = 5; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_; /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ public boolean hasPhase() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { return phase_; } @@ -8447,36 +8176,36 @@ public final class BackupProtos { return endTs_; } - // optional int64 total_bytes_copied = 10; - public static final int TOTAL_BYTES_COPIED_FIELD_NUMBER = 10; - private long totalBytesCopied_; + // optional uint32 progress = 10; + public static final int PROGRESS_FIELD_NUMBER = 10; + private int progress_; /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - public boolean hasTotalBytesCopied() { + public boolean hasProgress() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - public long getTotalBytesCopied() { - return totalBytesCopied_; + public int getProgress() { + return progress_; } - // optional string hlog_target_dir = 11; - public static final int HLOG_TARGET_DIR_FIELD_NUMBER = 11; - private java.lang.Object hlogTargetDir_; + // optional string job_id = 11; + public static final int JOB_ID_FIELD_NUMBER = 11; + private java.lang.Object jobId_; /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public boolean hasHlogTargetDir() { + public boolean hasJobId() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public java.lang.String getHlogTargetDir() { - java.lang.Object ref = hlogTargetDir_; + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -8484,57 +8213,74 @@ public final class BackupProtos { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - hlogTargetDir_ = s; + jobId_ = s; } return s; } } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ public com.google.protobuf.ByteString - getHlogTargetDirBytes() { - java.lang.Object ref = hlogTargetDir_; + getJobIdBytes() { + java.lang.Object ref = jobId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - hlogTargetDir_ = b; + jobId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // optional uint32 progress = 12; - public static final int PROGRESS_FIELD_NUMBER = 12; - private int progress_; + // required uint32 workers_number = 12; + public static final int WORKERS_NUMBER_FIELD_NUMBER = 12; + private int workersNumber_; /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - public boolean hasProgress() { + public boolean hasWorkersNumber() { return ((bitField0_ & 0x00000400) == 0x00000400); } /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - public int getProgress() { - return progress_; + public int getWorkersNumber() { + return workersNumber_; + } + + // required uint64 bandwidth = 13; + public static final int BANDWIDTH_FIELD_NUMBER = 13; + private long bandwidth_; + /** + * required uint64 bandwidth = 13; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * required uint64 bandwidth = 13; + */ + public long getBandwidth() { + return bandwidth_; } private void initFields() { backupId_ = ""; type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; targetRootDir_ = ""; - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; failedMessage_ = ""; tableBackupStatus_ = java.util.Collections.emptyList(); startTs_ = 0L; endTs_ = 0L; - totalBytesCopied_ = 0L; - hlogTargetDir_ = ""; progress_ = 0; + jobId_ = ""; + workersNumber_ = 0; + bandwidth_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -8553,6 +8299,14 @@ public final class BackupProtos { memoizedIsInitialized = 0; return false; } + if (!hasWorkersNumber()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBandwidth()) { + memoizedIsInitialized = 0; + return false; + } for (int i = 0; i < getTableBackupStatusCount(); i++) { if (!getTableBackupStatus(i).isInitialized()) { memoizedIsInitialized = 0; @@ -8594,13 +8348,16 @@ public final class BackupProtos { output.writeUInt64(9, endTs_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeInt64(10, totalBytesCopied_); + output.writeUInt32(10, progress_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeBytes(11, getHlogTargetDirBytes()); + output.writeBytes(11, getJobIdBytes()); } if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeUInt32(12, progress_); + output.writeUInt32(12, workersNumber_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + output.writeUInt64(13, bandwidth_); } getUnknownFields().writeTo(output); } @@ -8649,15 +8406,19 @@ public final class BackupProtos { } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(10, totalBytesCopied_); + .computeUInt32Size(10, progress_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getHlogTargetDirBytes()); + .computeBytesSize(11, getJobIdBytes()); } if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(12, progress_); + .computeUInt32Size(12, workersNumber_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(13, bandwidth_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -8676,10 +8437,10 @@ public final class BackupProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) obj; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) obj; boolean result = true; result = result && (hasBackupId() == other.hasBackupId()); @@ -8724,21 +8485,26 @@ public final class BackupProtos { result = result && (getEndTs() == other.getEndTs()); } - result = result && (hasTotalBytesCopied() == other.hasTotalBytesCopied()); - if (hasTotalBytesCopied()) { - result = result && (getTotalBytesCopied() - == other.getTotalBytesCopied()); - } - result = result && (hasHlogTargetDir() == other.hasHlogTargetDir()); - if (hasHlogTargetDir()) { - result = result && getHlogTargetDir() - .equals(other.getHlogTargetDir()); - } result = result && (hasProgress() == other.hasProgress()); if (hasProgress()) { result = result && (getProgress() == other.getProgress()); } + result = result && (hasJobId() == other.hasJobId()); + if (hasJobId()) { + result = result && getJobId() + .equals(other.getJobId()); + } + result = result && (hasWorkersNumber() == other.hasWorkersNumber()); + if (hasWorkersNumber()) { + result = result && (getWorkersNumber() + == other.getWorkersNumber()); + } + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -8788,70 +8554,74 @@ public final class BackupProtos { hash = (37 * hash) + END_TS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getEndTs()); } - if (hasTotalBytesCopied()) { - hash = (37 * hash) + TOTAL_BYTES_COPIED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTotalBytesCopied()); - } - if (hasHlogTargetDir()) { - hash = (37 * hash) + HLOG_TARGET_DIR_FIELD_NUMBER; - hash = (53 * hash) + getHlogTargetDir().hashCode(); - } if (hasProgress()) { hash = (37 * hash) + PROGRESS_FIELD_NUMBER; hash = (53 * hash) + getProgress(); } + if (hasJobId()) { + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + } + if (hasWorkersNumber()) { + hash = (37 * hash) + WORKERS_NUMBER_FIELD_NUMBER; + hash = (53 * hash) + getWorkersNumber(); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -8860,7 +8630,7 @@ public final class BackupProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -8872,24 +8642,24 @@ public final class BackupProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupContext} + * Protobuf type {@code hbase.pb.BackupInfo} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -8916,9 +8686,9 @@ public final class BackupProtos { bitField0_ = (bitField0_ & ~0x00000002); targetRootDir_ = ""; bitField0_ = (bitField0_ & ~0x00000004); - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; bitField0_ = (bitField0_ & ~0x00000008); - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; bitField0_ = (bitField0_ & ~0x00000010); failedMessage_ = ""; bitField0_ = (bitField0_ & ~0x00000020); @@ -8932,12 +8702,14 @@ public final class BackupProtos { bitField0_ = (bitField0_ & ~0x00000080); endTs_ = 0L; bitField0_ = (bitField0_ & ~0x00000100); - totalBytesCopied_ = 0L; + progress_ = 0; bitField0_ = (bitField0_ & ~0x00000200); - hlogTargetDir_ = ""; + jobId_ = ""; bitField0_ = (bitField0_ & ~0x00000400); - progress_ = 0; + workersNumber_ = 0; bitField0_ = (bitField0_ & ~0x00000800); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -8947,23 +8719,23 @@ public final class BackupProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext(this); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -9010,31 +8782,35 @@ public final class BackupProtos { if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000100; } - result.totalBytesCopied_ = totalBytesCopied_; + result.progress_ = progress_; if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000200; } - result.hlogTargetDir_ = hlogTargetDir_; + result.jobId_ = jobId_; if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000400; } - result.progress_ = progress_; + result.workersNumber_ = workersNumber_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000800; + } + result.bandwidth_ = bandwidth_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) return this; if (other.hasBackupId()) { bitField0_ |= 0x00000001; backupId_ = other.backupId_; @@ -9091,16 +8867,19 @@ public final class BackupProtos { if (other.hasEndTs()) { setEndTs(other.getEndTs()); } - if (other.hasTotalBytesCopied()) { - setTotalBytesCopied(other.getTotalBytesCopied()); + if (other.hasProgress()) { + setProgress(other.getProgress()); } - if (other.hasHlogTargetDir()) { + if (other.hasJobId()) { bitField0_ |= 0x00000400; - hlogTargetDir_ = other.hlogTargetDir_; + jobId_ = other.jobId_; onChanged(); } - if (other.hasProgress()) { - setProgress(other.getProgress()); + if (other.hasWorkersNumber()) { + setWorkersNumber(other.getWorkersNumber()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -9119,6 +8898,14 @@ public final class BackupProtos { return false; } + if (!hasWorkersNumber()) { + + return false; + } + if (!hasBandwidth()) { + + return false; + } for (int i = 0; i < getTableBackupStatusCount(); i++) { if (!getTableBackupStatus(i).isInitialized()) { @@ -9132,11 +8919,11 @@ public final class BackupProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -9331,24 +9118,24 @@ public final class BackupProtos { return this; } - // optional .hbase.pb.BackupContext.BackupState state = 4; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + // optional .hbase.pb.BackupInfo.BackupState state = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ public boolean hasState() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { return state_; } /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value) { if (value == null) { throw new NullPointerException(); } @@ -9358,33 +9145,33 @@ public final class BackupProtos { return this; } /** - * optional .hbase.pb.BackupContext.BackupState state = 4; + * optional .hbase.pb.BackupInfo.BackupState state = 4; */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000008); - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; onChanged(); return this; } - // optional .hbase.pb.BackupContext.BackupPhase phase = 5; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ public boolean hasPhase() { return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { return phase_; } /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ - public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value) { + public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value) { if (value == null) { throw new NullPointerException(); } @@ -9394,11 +9181,11 @@ public final class BackupProtos { return this; } /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ public Builder clearPhase() { bitField0_ = (bitField0_ & ~0x00000010); - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; onChanged(); return this; } @@ -9783,173 +9570,206 @@ public final class BackupProtos { return this; } - // optional int64 total_bytes_copied = 10; - private long totalBytesCopied_ ; + // optional uint32 progress = 10; + private int progress_ ; /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - public boolean hasTotalBytesCopied() { + public boolean hasProgress() { return ((bitField0_ & 0x00000200) == 0x00000200); } /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - public long getTotalBytesCopied() { - return totalBytesCopied_; + public int getProgress() { + return progress_; } /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - public Builder setTotalBytesCopied(long value) { + public Builder setProgress(int value) { bitField0_ |= 0x00000200; - totalBytesCopied_ = value; + progress_ = value; onChanged(); return this; } /** - * optional int64 total_bytes_copied = 10; + * optional uint32 progress = 10; */ - public Builder clearTotalBytesCopied() { + public Builder clearProgress() { bitField0_ = (bitField0_ & ~0x00000200); - totalBytesCopied_ = 0L; + progress_ = 0; onChanged(); return this; } - // optional string hlog_target_dir = 11; - private java.lang.Object hlogTargetDir_ = ""; + // optional string job_id = 11; + private java.lang.Object jobId_ = ""; /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public boolean hasHlogTargetDir() { + public boolean hasJobId() { return ((bitField0_ & 0x00000400) == 0x00000400); } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public java.lang.String getHlogTargetDir() { - java.lang.Object ref = hlogTargetDir_; + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - hlogTargetDir_ = s; + jobId_ = s; return s; } else { return (java.lang.String) ref; } } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ public com.google.protobuf.ByteString - getHlogTargetDirBytes() { - java.lang.Object ref = hlogTargetDir_; + getJobIdBytes() { + java.lang.Object ref = jobId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - hlogTargetDir_ = b; + jobId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public Builder setHlogTargetDir( + public Builder setJobId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000400; - hlogTargetDir_ = value; + jobId_ = value; onChanged(); return this; } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public Builder clearHlogTargetDir() { + public Builder clearJobId() { bitField0_ = (bitField0_ & ~0x00000400); - hlogTargetDir_ = getDefaultInstance().getHlogTargetDir(); + jobId_ = getDefaultInstance().getJobId(); onChanged(); return this; } /** - * optional string hlog_target_dir = 11; + * optional string job_id = 11; */ - public Builder setHlogTargetDirBytes( + public Builder setJobIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000400; - hlogTargetDir_ = value; + jobId_ = value; onChanged(); return this; } - // optional uint32 progress = 12; - private int progress_ ; + // required uint32 workers_number = 12; + private int workersNumber_ ; /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - public boolean hasProgress() { + public boolean hasWorkersNumber() { return ((bitField0_ & 0x00000800) == 0x00000800); } /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - public int getProgress() { - return progress_; + public int getWorkersNumber() { + return workersNumber_; } /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - public Builder setProgress(int value) { + public Builder setWorkersNumber(int value) { bitField0_ |= 0x00000800; - progress_ = value; + workersNumber_ = value; onChanged(); return this; } /** - * optional uint32 progress = 12; + * required uint32 workers_number = 12; */ - public Builder clearProgress() { + public Builder clearWorkersNumber() { bitField0_ = (bitField0_ & ~0x00000800); - progress_ = 0; + workersNumber_ = 0; + onChanged(); + return this; + } + + // required uint64 bandwidth = 13; + private long bandwidth_ ; + /** + * required uint64 bandwidth = 13; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * required uint64 bandwidth = 13; + */ + public long getBandwidth() { + return bandwidth_; + } + /** + * required uint64 bandwidth = 13; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00001000; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * required uint64 bandwidth = 13; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00001000); + bandwidth_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext) + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupInfo) } static { - defaultInstance = new BackupContext(true); + defaultInstance = new BackupInfo(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupContext) + // @@protoc_insertion_point(class_scope:hbase.pb.BackupInfo) } public interface BackupProcContextOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.BackupContext ctx = 1; + // required .hbase.pb.BackupInfo ctx = 1; /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ boolean hasCtx(); /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getCtx(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx(); /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder getCtxOrBuilder(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder(); // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; /** @@ -10028,11 +9848,11 @@ public final class BackupProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder subBuilder = null; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = ctx_.toBuilder(); } - ctx_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.PARSER, extensionRegistry); + ctx_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(ctx_); ctx_ = subBuilder.buildPartial(); @@ -10091,25 +9911,25 @@ public final class BackupProtos { } private int bitField0_; - // required .hbase.pb.BackupContext ctx = 1; + // required .hbase.pb.BackupInfo ctx = 1; public static final int CTX_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext ctx_; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo ctx_; /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ public boolean hasCtx() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getCtx() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx() { return ctx_; } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder getCtxOrBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder() { return ctx_; } @@ -10150,7 +9970,7 @@ public final class BackupProtos { } private void initFields() { - ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); serverTimestamp_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; @@ -10365,7 +10185,7 @@ public final class BackupProtos { public Builder clear() { super.clear(); if (ctxBuilder_ == null) { - ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); } else { ctxBuilder_.clear(); } @@ -10507,20 +10327,20 @@ public final class BackupProtos { } private int bitField0_; - // required .hbase.pb.BackupContext ctx = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + // required .hbase.pb.BackupInfo ctx = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder> ctxBuilder_; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder> ctxBuilder_; /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ public boolean hasCtx() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getCtx() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx() { if (ctxBuilder_ == null) { return ctx_; } else { @@ -10528,9 +10348,9 @@ public final class BackupProtos { } } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public Builder setCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext value) { + public Builder setCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo value) { if (ctxBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -10544,10 +10364,10 @@ public final class BackupProtos { return this; } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ public Builder setCtx( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder builderForValue) { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder builderForValue) { if (ctxBuilder_ == null) { ctx_ = builderForValue.build(); onChanged(); @@ -10558,14 +10378,14 @@ public final class BackupProtos { return this; } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public Builder mergeCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext value) { + public Builder mergeCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo value) { if (ctxBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && - ctx_ != org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) { + ctx_ != org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) { ctx_ = - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder(ctx_).mergeFrom(value).buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder(ctx_).mergeFrom(value).buildPartial(); } else { ctx_ = value; } @@ -10577,11 +10397,11 @@ public final class BackupProtos { return this; } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ public Builder clearCtx() { if (ctxBuilder_ == null) { - ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); onChanged(); } else { ctxBuilder_.clear(); @@ -10590,17 +10410,17 @@ public final class BackupProtos { return this; } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder getCtxBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder getCtxBuilder() { bitField0_ |= 0x00000001; onChanged(); return getCtxFieldBuilder().getBuilder(); } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder getCtxOrBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder() { if (ctxBuilder_ != null) { return ctxBuilder_.getMessageOrBuilder(); } else { @@ -10608,14 +10428,14 @@ public final class BackupProtos { } } /** - * required .hbase.pb.BackupContext ctx = 1; + * required .hbase.pb.BackupInfo ctx = 1; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder> getCtxFieldBuilder() { if (ctxBuilder_ == null) { ctxBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder>( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder>( ctx_, getParentForChildren(), isClean()); @@ -10906,10 +10726,10 @@ public final class BackupProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_BackupContext_descriptor; + internal_static_hbase_pb_BackupInfo_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_BackupContext_fieldAccessorTable; + internal_static_hbase_pb_BackupInfo_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_BackupProcContext_descriptor; private static @@ -10936,44 +10756,42 @@ public final class BackupProtos { "\n\006server\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024Tab", "leServerTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase" + ".pb.TableName\0223\n\020server_timestamp\030\002 \003(\0132" + - "\031.hbase.pb.ServerTimestamp\"\313\002\n\016BackupMan" + + "\031.hbase.pb.ServerTimestamp\"\220\002\n\016BackupMan" + "ifest\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(" + "\t\022\"\n\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n" + "\ntable_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020" + - "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n" + - "\013total_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n" + - "\007tst_map\030\t \003(\0132\036.hbase.pb.TableServerTim" + - "estamp\0225\n\026dependent_backup_image\030\n \003(\0132\025", - ".hbase.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010" + - "\"]\n\021TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.h" + - "base.pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n" + - "\010snapshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbac" + - "kup_id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Ba" + - "ckupType\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005sta" + - "te\030\004 \001(\0162#.hbase.pb.BackupContext.Backup" + - "State\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupCo" + - "ntext.BackupPhase\022\026\n\016failed_message\030\006 \001(" + - "\t\0228\n\023table_backup_status\030\007 \003(\0132\033.hbase.p", - "b.TableBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n" + - "\006end_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(" + - "\003\022\027\n\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014" + - " \001(\r\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNN" + - "ING\020\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCE" + - "LLED\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SN" + - "APSHOT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNA" + - "PSHOTCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STO" + - "RE_MANIFEST\020\005\"n\n\021BackupProcContext\022$\n\003ct" + - "x\030\001 \002(\0132\027.hbase.pb.BackupContext\0223\n\020serv", - "er_timestamp\030\002 \003(\0132\031.hbase.pb.ServerTime" + - "stamp*k\n\024FullTableBackupState\022\026\n\022PRE_SNA" + - "PSHOT_TABLE\020\001\022\023\n\017SNAPSHOT_TABLES\020\002\022\021\n\rSN" + - "APSHOT_COPY\020\003\022\023\n\017BACKUP_COMPLETE\020\004*f\n\033In" + - "crementalTableBackupState\022\027\n\023PREPARE_INC" + - "REMENTAL\020\001\022\024\n\020INCREMENTAL_COPY\020\002\022\030\n\024INCR" + - "_BACKUP_COMPLETE\020\003*\'\n\nBackupType\022\010\n\004FULL" + - "\020\000\022\017\n\013INCREMENTAL\020\001BB\n*org.apache.hadoop" + - ".hbase.protobuf.generatedB\014BackupProtosH" + - "\001\210\001\001\240\001\001" + "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022/\n" + + "\007tst_map\030\007 \003(\0132\036.hbase.pb.TableServerTim" + + "estamp\0225\n\026dependent_backup_image\030\010 \003(\0132\025" + + ".hbase.pb.BackupImage\"]\n\021TableBackupStat", + "us\022\"\n\005table\030\001 \002(\0132\023.hbase.pb.TableName\022\022" + + "\n\ntarget_dir\030\002 \002(\t\022\020\n\010snapshot\030\003 \001(\t\"\320\004\n" + + "\nBackupInfo\022\021\n\tbackup_id\030\001 \002(\t\022\"\n\004type\030\002" + + " \002(\0162\024.hbase.pb.BackupType\022\027\n\017target_roo" + + "t_dir\030\003 \002(\t\022/\n\005state\030\004 \001(\0162 .hbase.pb.Ba" + + "ckupInfo.BackupState\022/\n\005phase\030\005 \001(\0162 .hb" + + "ase.pb.BackupInfo.BackupPhase\022\026\n\016failed_" + + "message\030\006 \001(\t\0228\n\023table_backup_status\030\007 \003" + + "(\0132\033.hbase.pb.TableBackupStatus\022\020\n\010start" + + "_ts\030\010 \001(\004\022\016\n\006end_ts\030\t \001(\004\022\020\n\010progress\030\n ", + "\001(\r\022\016\n\006job_id\030\013 \001(\t\022\026\n\016workers_number\030\014 " + + "\002(\r\022\021\n\tbandwidth\030\r \002(\004\"P\n\013BackupState\022\013\n" + + "\007WAITING\020\000\022\013\n\007RUNNING\020\001\022\014\n\010COMPLETE\020\002\022\n\n" + + "\006FAILED\020\003\022\r\n\tCANCELLED\020\004\"}\n\013BackupPhase\022" + + "\013\n\007REQUEST\020\000\022\014\n\010SNAPSHOT\020\001\022\027\n\023PREPARE_IN" + + "CREMENTAL\020\002\022\020\n\014SNAPSHOTCOPY\020\003\022\024\n\020INCREME" + + "NTAL_COPY\020\004\022\022\n\016STORE_MANIFEST\020\005\"k\n\021Backu" + + "pProcContext\022!\n\003ctx\030\001 \002(\0132\024.hbase.pb.Bac" + + "kupInfo\0223\n\020server_timestamp\030\002 \003(\0132\031.hbas" + + "e.pb.ServerTimestamp*k\n\024FullTableBackupS", + "tate\022\026\n\022PRE_SNAPSHOT_TABLE\020\001\022\023\n\017SNAPSHOT" + + "_TABLES\020\002\022\021\n\rSNAPSHOT_COPY\020\003\022\023\n\017BACKUP_C" + + "OMPLETE\020\004*f\n\033IncrementalTableBackupState" + + "\022\027\n\023PREPARE_INCREMENTAL\020\001\022\024\n\020INCREMENTAL" + + "_COPY\020\002\022\030\n\024INCR_BACKUP_COMPLETE\020\003*\'\n\nBac" + + "kupType\022\010\n\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\014BackupProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -11009,19 +10827,19 @@ public final class BackupProtos { internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupManifest_descriptor, - new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TotalBytes", "LogBytes", "TstMap", "DependentBackupImage", "Compacted", }); + new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TstMap", "DependentBackupImage", }); internal_static_hbase_pb_TableBackupStatus_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableBackupStatus_descriptor, new java.lang.String[] { "Table", "TargetDir", "Snapshot", }); - internal_static_hbase_pb_BackupContext_descriptor = + internal_static_hbase_pb_BackupInfo_descriptor = getDescriptor().getMessageTypes().get(6); - internal_static_hbase_pb_BackupContext_fieldAccessorTable = new + internal_static_hbase_pb_BackupInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_BackupContext_descriptor, - new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "TotalBytesCopied", "HlogTargetDir", "Progress", }); + internal_static_hbase_pb_BackupInfo_descriptor, + new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "Progress", "JobId", "WorkersNumber", "Bandwidth", }); internal_static_hbase_pb_BackupProcContext_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_hbase_pb_BackupProcContext_fieldAccessorTable = new diff --git hbase-protocol/src/main/protobuf/Backup.proto hbase-protocol/src/main/protobuf/Backup.proto index c17ad065e4457f66a9197fdba8717334802659a5..7d1ec4b221651c4657fc11d13555173d9f910177 100644 --- hbase-protocol/src/main/protobuf/Backup.proto +++ hbase-protocol/src/main/protobuf/Backup.proto @@ -77,11 +77,8 @@ message BackupManifest { repeated TableName table_list = 4; required uint64 start_ts = 5; required uint64 complete_ts = 6; - required int64 total_bytes = 7; - optional int64 log_bytes = 8; - repeated TableServerTimestamp tst_map = 9; - repeated BackupImage dependent_backup_image = 10; - required bool compacted = 11; + repeated TableServerTimestamp tst_map = 7; + repeated BackupImage dependent_backup_image = 8; } message TableBackupStatus { @@ -90,7 +87,7 @@ message TableBackupStatus { optional string snapshot = 3; } -message BackupContext { +message BackupInfo { required string backup_id = 1; required BackupType type = 2; required string target_root_dir = 3; @@ -100,9 +97,10 @@ message BackupContext { repeated TableBackupStatus table_backup_status = 7; optional uint64 start_ts = 8; optional uint64 end_ts = 9; - optional int64 total_bytes_copied = 10; - optional string hlog_target_dir = 11; - optional uint32 progress = 12; + optional uint32 progress = 10; + optional string job_id = 11; + required uint32 workers_number = 12; + required uint64 bandwidth = 13; enum BackupState { WAITING = 0; @@ -123,7 +121,7 @@ message BackupContext { } message BackupProcContext { - required BackupContext ctx = 1; + required BackupInfo ctx = 1; repeated ServerTimestamp server_timestamp = 2; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java new file mode 100644 index 0000000000000000000000000000000000000000..7b0b4545ece88d91b0157094b7e61f7e4ace0533 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hbase.backup.util.BackupSet; + +public interface BackupClient extends Configurable{ + + /** + * Describe backup image command + * @param backupId - backup id + * @return backup info + * @throws IOException + */ + public BackupInfo getBackupInfo(String backupId) throws IOException; + + /** + * Show backup progress command + * @param backupId - backup id (may be null) + * @return backup progress (0-100%), -1 if no active sessions + * or session not found + * @throws IOException + */ + public int getProgress(String backupId) throws IOException; + + /** + * Delete backup image command + * @param backupIds - backup id + * @return total number of deleted sessions + * @throws IOException + */ + public int deleteBackups(String[] backupIds) throws IOException; + +// /** +// TODO: Phase 3 +// * Cancel current active backup command +// * @param backupId - backup id +// * @throws IOException +// */ +// public void cancelBackup(String backupId) throws IOException; + + /** + * Show backup history command + * @param n - last n backup sessions + * @throws IOException + */ + public List getHistory(int n) throws IOException; + + /** + * Backup sets list command - list all backup sets. Backup set is + * a named group of tables. + * @throws IOException + */ + public List listBackupSets() throws IOException; + + /** + * Backup set describe command. Shows list of tables in + * this particular backup set. + * @param name set name + * @return backup set description or null + * @throws IOException + */ + public BackupSet getBackupSet(String name) throws IOException; + + /** + * Delete backup set command + * @param name - backup set name + * @return true, if success, false - otherwise + * @throws IOException + */ + public boolean deleteBackupSet(String name) throws IOException; + + /** + * Add tables to backup set command + * @param name - name of backup set. + * @param tables - list of tables to be added to this set. + * @throws IOException + */ + public void addToBackupSet(String name, String[] tablesOrNamespaces) throws IOException; + + /** + * Remove tables from backup set + * @param name - name of backup set. + * @param tables - list of tables to be removed from this set. + * @throws IOException + */ + public void removeFromBackupSet(String name, String[] tablesOrNamepsaces) throws IOException; + + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 015c80b5f2fbd367e9b644e6a3c309a079b6ec50..182c4022afc50c049dec2e76b0134ce1971fa57e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -20,9 +20,6 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.cli.PosixParser; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,13 +36,22 @@ import org.apache.log4j.Logger; public class BackupDriver extends AbstractHBaseTool { private static final Log LOG = LogFactory.getLog(BackupDriver.class); - private Options opt; private CommandLine cmd; - + + public BackupDriver() throws IOException + { + init(); + } + protected void init() throws IOException { // define supported options - opt = new Options(); - opt.addOption("debug", false, "Enable debug loggings"); + addOptNoArg("debug", "Enable debug loggings"); + addOptNoArg("all", "All tables"); + addOptWithArg("t", "Table name"); + addOptWithArg("b", "Bandwidth (MB/s)"); + addOptWithArg("w", "Number of workers"); + addOptWithArg("n", "History length"); + addOptWithArg("set", "Backup set name"); // disable irrelevant loggers to avoid it mess up command output LogUtils.disableUselessLoggers(LOG); @@ -64,19 +70,22 @@ public class BackupDriver extends AbstractHBaseTool { System.arraycopy(args, 1, remainArgs, 0, args.length - 1); } } - CommandLine cmdline = null; - try { - cmdline = new PosixParser().parse(opt, remainArgs); - } catch (ParseException e) { - LOG.error("Could not parse command", e); - return -1; - } BackupCommand type = BackupCommand.HELP; if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) { type = BackupCommand.CREATE; } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) { type = BackupCommand.HELP; + } else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.DELETE; + } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.DESCRIBE; + } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.HISTORY; + } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.PROGRESS; + } else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.SET; } else { System.out.println("Unsupported command for backup: " + cmd); return -1; @@ -84,14 +93,18 @@ public class BackupDriver extends AbstractHBaseTool { // enable debug logging Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); - if (cmdline.hasOption("debug")) { + if (this.cmd.hasOption("debug")) { backupClientLogger.setLevel(Level.DEBUG); } else { backupClientLogger.setLevel(Level.INFO); } // TODO: get rid of Command altogether? - BackupCommands.createCommand(getConf(), type, cmdline).execute(); + BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd); + if( type == BackupCommand.CREATE && conf != null) { + ((BackupCommands.CreateCommand) command).setConf(conf); + } + command.execute(); return 0; } @@ -106,14 +119,14 @@ public class BackupDriver extends AbstractHBaseTool { @Override protected int doWork() throws Exception { - init(); return parseAndRun(cmd.getArgs()); } public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); - int ret = ToolRunner.run(conf, new BackupDriver(), args); - System.exit(ret); + int ret = ToolRunner.run(conf, new BackupDriver(), args); + System.exit(ret); } + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java new file mode 100644 index 0000000000000000000000000000000000000000..1ed95f4a8a4862e4f27c1b9b2e419e2a510b3dcf --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; + + +/** + * An object to encapsulate the information for each backup request + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupInfo implements Comparable { + private static final Log LOG = LogFactory.getLog(BackupInfo.class); + // backup status flag + public static enum BackupState { + WAITING, RUNNING, COMPLETE, FAILED, CANCELLED; + } + // backup phase + public static enum BackupPhase { + SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; + } + + // backup id: a timestamp when we request the backup + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // target root directory for storing the backup files + private String targetRootDir; + + // overall backup state + private BackupState state; + + // overall backup phase + private BackupPhase phase; + + // overall backup failure message + private String failedMsg; + + // backup status map for all tables + private Map backupStatusMap; + + // actual start timestamp of the backup process + private long startTs; + + // actual end timestamp of the backup process, could be fail or complete + private long endTs; + + // the total bytes of incremental logs copied + private long totalBytesCopied; + + // for incremental backup, the location of the backed-up hlogs + private String hlogTargetDir = null; + + // incremental backup file list + transient private List incrBackupFileList; + + // new region server log timestamps for table set after distributed log roll + // key - table name, value - map of RegionServer hostname -> last log rolled timestamp + transient private HashMap> tableSetTimestampMap; + + // backup progress in %% (0-100) + private int progress; + + // distributed job id + private String jobId; + + // Number of parallel workers. -1 - system defined + private int workers = -1; + + // Bandwidth per worker in MB per sec. -1 - unlimited + private long bandwidth = -1; + + public BackupInfo() { + } + + public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) { + backupStatusMap = new HashMap(); + + this.backupId = backupId; + this.type = type; + this.targetRootDir = targetRootDir; + if(LOG.isDebugEnabled()){ + LOG.debug("CreateBackupContext: " + tables.length+" "+tables[0] ); + } + this.addTables(tables); + + if (type == BackupType.INCREMENTAL) { + setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId)); + } + + this.startTs = 0; + this.endTs = 0; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public int getWorkers() { + return workers; + } + + public void setWorkers(int workers) { + this.workers = workers; + } + + public long getBandwidth() { + return bandwidth; + } + + public void setBandwidth(long bandwidth) { + this.bandwidth = bandwidth; + } + + public void setBackupStatusMap(Map backupStatusMap) { + this.backupStatusMap = backupStatusMap; + } + + public HashMap> getTableSetTimestampMap() { + return tableSetTimestampMap; + } + + public void setTableSetTimestampMap(HashMap> tableSetTimestampMap) { + this.tableSetTimestampMap = tableSetTimestampMap; + } + + public String getHlogTargetDir() { + return hlogTargetDir; + } + + public void setType(BackupType type) { + this.type = type; + } + + public void setTargetRootDir(String targetRootDir) { + this.targetRootDir = targetRootDir; + } + + public void setTotalBytesCopied(long totalBytesCopied) { + this.totalBytesCopied = totalBytesCopied; + } + + public void setCancelled(boolean cancelled) { + this.state = BackupState.CANCELLED;; + } + + /** + * Set progress (0-100%) + * @param msg progress value + */ + + public void setProgress(int p) { + this.progress = p; + } + + /** + * Get current progress + */ + public int getProgress() { + return progress; + } + + + /** + * Has been marked as cancelled or not. + * @return True if marked as cancelled + */ + public boolean isCancelled() { + return this.state == BackupState.CANCELLED; + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupStatus getBackupStatus(TableName table) { + return this.backupStatusMap.get(table); + } + + public String getFailedMsg() { + return failedMsg; + } + + public void setFailedMsg(String failedMsg) { + this.failedMsg = failedMsg; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getEndTs() { + return endTs; + } + + public void setEndTs(long endTs) { + this.endTs = endTs; + } + + public long getTotalBytesCopied() { + return totalBytesCopied; + } + + public BackupState getState() { + return state; + } + + public void setState(BackupState flag) { + this.state = flag; + } + + public BackupPhase getPhase() { + return phase; + } + + public void setPhase(BackupPhase phase) { + this.phase = phase; + } + + public BackupType getType() { + return type; + } + + public void setSnapshotName(TableName table, String snapshotName) { + this.backupStatusMap.get(table).setSnapshotName(snapshotName); + } + + public String getSnapshotName(TableName table) { + return this.backupStatusMap.get(table).getSnapshotName(); + } + + public List getSnapshotNames() { + List snapshotNames = new ArrayList(); + for (BackupStatus backupStatus : this.backupStatusMap.values()) { + snapshotNames.add(backupStatus.getSnapshotName()); + } + return snapshotNames; + } + + public Set getTables() { + return this.backupStatusMap.keySet(); + } + + public List getTableNames() { + return new ArrayList(backupStatusMap.keySet()); + } + + public void addTables(TableName[] tables) { + for (TableName table : tables) { + BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); + this.backupStatusMap.put(table, backupStatus); + } + } + + public String getTargetRootDir() { + return targetRootDir; + } + + public void setHlogTargetDir(String hlogTagetDir) { + this.hlogTargetDir = hlogTagetDir; + } + + public String getHLogTargetDir() { + return hlogTargetDir; + } + + public List getIncrBackupFileList() { + return incrBackupFileList; + } + + public void setIncrBackupFileList(List incrBackupFileList) { + this.incrBackupFileList = incrBackupFileList; + } + + /** + * Set the new region server log timestamps after distributed log roll + * @param newTableSetTimestampMap table timestamp map + */ + public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) { + this.tableSetTimestampMap = newTableSetTimestampMap; + } + + /** + * Get new region server log timestamps after distributed log roll + * @return new region server log timestamps + */ + public HashMap> getIncrTimestampMap() { + return this.tableSetTimestampMap; + } + + public TableName getTableBySnapshot(String snapshotName) { + for (Entry entry : this.backupStatusMap.entrySet()) { + if (snapshotName.equals(entry.getValue().getSnapshotName())) { + return entry.getKey(); + } + } + return null; + } + + public BackupProtos.BackupInfo toProtosBackupInfo() { + BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder(); + builder.setBackupId(getBackupId()); + setBackupStatusMap(builder); + builder.setEndTs(getEndTs()); + if (getFailedMsg() != null) { + builder.setFailedMessage(getFailedMsg()); + } + if (getState() != null) { + builder.setState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name())); + } + if (getPhase() != null) { + builder.setPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name())); + } + + builder.setProgress(getProgress()); + builder.setStartTs(getStartTs()); + builder.setTargetRootDir(getTargetRootDir()); + builder.setType(BackupProtos.BackupType.valueOf(getType().name())); + builder.setWorkersNumber(workers); + builder.setBandwidth(bandwidth); + if (jobId != null) { + builder.setJobId(jobId); + } + return builder.build(); + } + + public byte[] toByteArray() throws IOException { + return toProtosBackupInfo().toByteArray(); + } + + private void setBackupStatusMap(Builder builder) { + for (Entry entry: backupStatusMap.entrySet()) { + builder.addTableBackupStatus(entry.getValue().toProto()); + } + } + + public static BackupInfo fromByteArray(byte[] data) throws IOException { + return fromProto(BackupProtos.BackupInfo.parseFrom(data)); + } + + public static BackupInfo fromStream(final InputStream stream) throws IOException { + return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream)); + } + + public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { + BackupInfo context = new BackupInfo(); + context.setBackupId(proto.getBackupId()); + context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); + context.setEndTs(proto.getEndTs()); + if (proto.hasFailedMessage()) { + context.setFailedMsg(proto.getFailedMessage()); + } + if (proto.hasState()) { + context.setState(BackupInfo.BackupState.valueOf(proto.getState().name())); + } + + context.setHlogTargetDir(HBackupFileSystem.getLogBackupDir(proto.getTargetRootDir(), + proto.getBackupId())); + + if (proto.hasPhase()) { + context.setPhase(BackupPhase.valueOf(proto.getPhase().name())); + } + if (proto.hasProgress()) { + context.setProgress(proto.getProgress()); + } + context.setStartTs(proto.getStartTs()); + context.setTargetRootDir(proto.getTargetRootDir()); + context.setType(BackupType.valueOf(proto.getType().name())); + context.setWorkers(proto.getWorkersNumber()); + context.setBandwidth(proto.getBandwidth()); + if (proto.hasJobId()) { + context.setJobId(proto.getJobId()); + } + return context; + } + + private static Map toMap(List list) { + HashMap map = new HashMap<>(); + for (TableBackupStatus tbs : list){ + map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); + } + return map; + } + + public String getShortDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("ID : " + backupId).append("\n"); + sb.append("Tables : " + getTableListAsString()).append("\n"); + sb.append("State : " + getState()).append("\n"); + Date date = null; + Calendar cal = Calendar.getInstance(); + cal.setTimeInMillis(getStartTs()); + date = cal.getTime(); + sb.append("Start time : " + date).append("\n"); + if (state == BackupState.FAILED) { + sb.append("Failed message : " + getFailedMsg()).append("\n"); + } else if (state == BackupState.RUNNING) { + sb.append("Phase : " + getPhase()).append("\n"); + } else if (state == BackupState.COMPLETE) { + cal = Calendar.getInstance(); + cal.setTimeInMillis(getEndTs()); + date = cal.getTime(); + sb.append("End time : " + date).append("\n"); + } + sb.append("Progress : " + getProgress()).append("\n"); + return sb.toString(); + } + + public String getStatusAndProgressAsString() { + StringBuilder sb = new StringBuilder(); + sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) + .append(" progress: ").append(getProgress()); + return sb.toString(); + } + + public String getTableListAsString() { + return StringUtils.join(backupStatusMap.keySet(), ";"); + } + + @Override + public int compareTo(BackupInfo o) { + Long thisTS = + new Long(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); + Long otherTS = + new Long(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java index e0c6483073a5ba6f85c54be52355e527d607e806..30882bdb51be8c22800fa8128ca27cff0bb85b67 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.backup; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.impl.BackupClientImpl; import org.apache.hadoop.hbase.backup.impl.BackupCopyService; import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService; import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl; @@ -33,6 +34,7 @@ public final class BackupRestoreFactory { public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class"; public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class"; + public final static String HBASE_BACKUP_CLIENT_IMPL_CLASS = "hbase.backup.client.class"; public final static String HBASE_RESTORE_CLIENT_IMPL_CLASS = "hbase.restore.client.class"; private BackupRestoreFactory(){ @@ -48,7 +50,9 @@ public final class BackupRestoreFactory { Class cls = conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreService.class, IncrementalRestoreService.class); - return ReflectionUtils.newInstance(cls, conf); + IncrementalRestoreService service = ReflectionUtils.newInstance(cls, conf); + service.setConf(conf); + return service; } /** @@ -60,7 +64,22 @@ public final class BackupRestoreFactory { Class cls = conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyService.class, BackupCopyService.class); - return ReflectionUtils.newInstance(cls, conf); + BackupCopyService service = ReflectionUtils.newInstance(cls, conf);; + service.setConf(conf); + return service; + } + /** + * Gets backup client implementation + * @param conf - configuration + * @return backup client + */ + public static BackupClient getBackupClient(Configuration conf) { + Class cls = + conf.getClass(HBASE_BACKUP_CLIENT_IMPL_CLASS, BackupClientImpl.class, + BackupClient.class); + BackupClient client = ReflectionUtils.newInstance(cls, conf); + client.setConf(conf); + return client; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java new file mode 100644 index 0000000000000000000000000000000000000000..1564b04bacc4f36b9c68378bafdf72f1c44e9f62 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.Serializable; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * Backup status and related information encapsulated for a table. + * At this moment only TargetDir and SnapshotName is encapsulated here. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupStatus implements Serializable { + + private static final long serialVersionUID = -5968397963548535982L; + + // table name for backup + private TableName table; + + // target directory of the backup image for this table + private String targetDir; + + // snapshot name for offline/online snapshot + private String snapshotName = null; + + public BackupStatus() { + + } + + public BackupStatus(TableName table, String targetRootDir, String backupId) { + this.table = table; + this.targetDir = HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + } + + public String getSnapshotName() { + return snapshotName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getTargetDir() { + return targetDir; + } + + public TableName getTable() { + return table; + } + + public void setTable(TableName table) { + this.table = table; + } + + public void setTargetDir(String targetDir) { + this.targetDir = targetDir; + } + + public static BackupStatus convert(BackupProtos.TableBackupStatus proto) + { + BackupStatus bs = new BackupStatus(); + bs.setTable(ProtobufUtil.toTableName(proto.getTable())); + bs.setTargetDir(proto.getTargetDir()); + if(proto.hasSnapshot()){ + bs.setSnapshotName(proto.getSnapshot()); + } + return bs; + } + + public BackupProtos.TableBackupStatus toProto() { + BackupProtos.TableBackupStatus.Builder builder = + BackupProtos.TableBackupStatus.newBuilder(); + if(snapshotName != null) { + builder.setSnapshot(snapshotName); + } + builder.setTable(ProtobufUtil.toProtoTableName(table)); + builder.setTargetDir(targetDir); + return builder.build(); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index 4e881253c2b0b58a83703b7dd39e67be7a7ca866..e9c607b473be6fe6288fe27d49fc6b8e6888b6c4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -51,7 +51,7 @@ public class HBackupFileSystem { /** * Given the backup root dir, backup id and the table name, return the backup image location, * which is also where the backup manifest file is. return value look like: - * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738" + * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" * @param backupRootDir backup root directory * @param backupId backup id * @param table table name @@ -59,22 +59,22 @@ public class HBackupFileSystem { */ public static String getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { - return backupRootDir + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR - + tableName.getQualifierAsString() + Path.SEPARATOR + backupId; + return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR + + tableName.getNamespaceAsString() + Path.SEPARATOR + + tableName.getQualifierAsString() + Path.SEPARATOR ; } /** * Given the backup root dir, backup id and the table name, return the backup image location, * which is also where the backup manifest file is. return value look like: - * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738" + * "hdfs://backup.hbase.org:9000/user/biadmin/backup_1396650096738/backup1/default/t1_dn/" * @param backupRootPath backup root path * @param tableName table name * @param backupId backup Id * @return backupPath for the particular table */ - public static Path getTableBackupPath(Path backupRootPath, TableName tableName, String backupId) { - return new Path(backupRootPath, tableName.getNamespaceAsString() + Path.SEPARATOR - + tableName.getQualifierAsString() + Path.SEPARATOR + backupId); + public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) { + return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName)); } /** @@ -85,8 +85,8 @@ public class HBackupFileSystem { * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" */ public static String getLogBackupDir(String backupRootDir, String backupId) { - return backupRootDir + Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME + Path.SEPARATOR - + backupId; + return backupRootDir + Path.SEPARATOR + backupId+ Path.SEPARATOR + + HConstants.HREGION_LOGDIR_NAME; } public static Path getLogBackupPath(String backupRootDir, String backupId) { @@ -95,7 +95,7 @@ public class HBackupFileSystem { private static Path getManifestPath(TableName tableName, Configuration conf, Path backupRootPath, String backupId) throws IOException { - Path manifestPath = new Path(getTableBackupPath(backupRootPath, tableName, backupId), + Path manifestPath = new Path(getTableBackupPath(tableName, backupRootPath, backupId), BackupManifest.MANIFEST_FILE_NAME); FileSystem fs = backupRootPath.getFileSystem(conf); if (!fs.exists(manifestPath)) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index 541882a3132de418fb0a722092a69c139720271b..6739b5ca7ed82641ae8939a7ebfa58a835488080 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -20,13 +20,9 @@ package org.apache.hadoop.hbase.backup; import java.io.IOException; import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.cli.PosixParser; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupUtil; @@ -38,8 +34,7 @@ import org.apache.log4j.Logger; public class RestoreDriver extends AbstractHBaseTool { - private static final Log LOG = LogFactory.getLog(BackupDriver.class); - private Options opt; + private static final Log LOG = LogFactory.getLog(RestoreDriver.class); private CommandLine cmd; private static final String OPTION_OVERWRITE = "overwrite"; @@ -68,27 +63,25 @@ public class RestoreDriver extends AbstractHBaseTool { + " or using \"hbase backup describe\" command. Without this option, " + "only\n" + " this backup image is restored\n"; + + protected RestoreDriver() throws IOException + { + init(); + } + protected void init() throws IOException { // define supported options - opt = new Options(); - opt.addOption(OPTION_OVERWRITE, false, + addOptNoArg(OPTION_OVERWRITE, "Overwrite the data if any of the restore target tables exists"); - opt.addOption(OPTION_CHECK, false, "Check restore sequence and dependencies"); - opt.addOption(OPTION_AUTOMATIC, false, "Restore all dependencies"); - opt.addOption("debug", false, "Enable debug logging"); + addOptNoArg(OPTION_CHECK, "Check restore sequence and dependencies"); + addOptNoArg(OPTION_AUTOMATIC, "Restore all dependencies"); + addOptNoArg("debug", "Enable debug logging"); // disable irrelevant loggers to avoid it mess up command output LogUtils.disableUselessLoggers(LOG); } private int parseAndRun(String[] args) { - CommandLine cmd = null; - try { - cmd = new PosixParser().parse(opt, args); - } catch (ParseException e) { - LOG.error("Could not parse command", e); - return -1; - } // enable debug logging Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); @@ -119,34 +112,34 @@ public class RestoreDriver extends AbstractHBaseTool { // parse main restore command options String[] remainArgs = cmd.getArgs(); - if (remainArgs.length < 3) { - System.out.println("ERROR: missing arguments"); + if (remainArgs.length < 4) { System.out.println(USAGE); return -1; } - String backupRootDir = remainArgs[0]; - String backupId = remainArgs[1]; - String tables = remainArgs[2]; - - String tableMapping = (remainArgs.length > 3) ? remainArgs[3] : null; + String backupRootDir = remainArgs[1]; + String backupId = remainArgs[2]; + String tables = remainArgs[3]; + + String tableMapping = (remainArgs.length > 4) ? remainArgs[4] : null; TableName[] sTableArray = BackupUtil.parseTableNames(tables); TableName[] tTableArray = BackupUtil.parseTableNames(tableMapping); if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)) { - System.err.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); + System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); System.out.println(USAGE); - return -1; + return -2; } - try { - RestoreClient client = BackupRestoreFactory.getRestoreClient(conf); + + RestoreClient client = BackupRestoreFactory.getRestoreClient(getConf()); + try{ client.restore(backupRootDir, backupId, check, autoRestore, sTableArray, tTableArray, isOverwrite); - } catch (IOException e) { - System.err.println("ERROR: " + e.getMessage()); - return -1; + } catch (Exception e){ + e.printStackTrace(); + return -3; } return 0; } @@ -162,13 +155,12 @@ public class RestoreDriver extends AbstractHBaseTool { @Override protected int doWork() throws Exception { - init(); return parseAndRun(cmd.getArgs()); } public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); - int ret = ToolRunner.run(conf, new BackupDriver(), args); + int ret = ToolRunner.run(conf, new RestoreDriver(), args); System.exit(ret); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java new file mode 100644 index 0000000000000000000000000000000000000000..7b2db3df26db846b50452fabafa9cefe2df3e701 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java @@ -0,0 +1,231 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClient; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +/** + * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following + * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental + * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL + * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add + * and remove table to and from Backup image - Cancel a backup process - Full backup based on + * existing snapshot - Describe information of a backup image + */ + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class BackupClientImpl implements BackupClient{ + private static final Log LOG = LogFactory.getLog(BackupClientImpl.class); + private Configuration conf; + + public BackupClientImpl() { + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + + @Override + public BackupInfo getBackupInfo(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + backupInfo = table.readBackupInfo(backupId); + return backupInfo; + } + } + + @Override + public int getProgress(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + if (backupId == null) { + ArrayList recentSessions = + table.getBackupContexts(BackupState.RUNNING); + if (recentSessions.isEmpty()) { + LOG.warn("No ongonig sessions found."); + return -1; + } + // else show status for all ongoing sessions + // must be one maximum + return recentSessions.get(0).getProgress(); + } else { + + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + return backupInfo.getProgress(); + } else { + LOG.warn("No information found for backupID=" + backupId); + return -1; + } + } + } + } + + @Override + public int deleteBackups(String[] backupIds) throws IOException { + BackupInfo backupInfo = null; + String backupId = null; + int totalDeleted = 0; + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + for (int i = 0; i < backupIds.length; i++) { + backupId = backupIds[i]; + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + BackupUtil.cleanupBackupData(backupInfo, conf); + table.deleteBackupInfo(backupInfo.getBackupId()); + System.out.println("Delete backup for backupID=" + backupId + " completed."); + totalDeleted++; + } else { + System.out.println("Delete backup failed: no information found for backupID=" + backupId); + } + } + } + return totalDeleted; + } + +//TODO: Cancel backup? + +// @Override +// public void cancelBackup(String backupId) throws IOException { +// // Kill distributed job if active +// // Backup MUST not be in COMPLETE state +// try (final BackupSystemTable table = new BackupSystemTable(conf)) { +// BackupContext backupContext = table.readBackupStatus(backupId); +// String errMessage = null; +// if (backupContext != null && backupContext.getState() != BackupState.COMPLETE) { +// BackupUtil.cleanupBackupData(backupContext, conf); +// table.deleteBackupStatus(backupContext.getBackupId()); +// byte[] jobId = backupContext.getJobId(); +// if(jobId != null) { +// BackupCopyService service = BackupRestoreFactory.getBackupCopyService(conf); +// service.cancelCopyJob(jobId); +// } else{ +// errMessage = "Distributed Job ID is null for backup "+backupId + +// " in "+ backupContext.getState() + " state."; +// } +// } else if( backupContext == null){ +// errMessage = "No information found for backupID=" + backupId; +// } else { +// errMessage = "Can not cancel "+ backupId + " in " + backupContext.getState()+" state"; +// } +// +// if( errMessage != null) { +// throw new IOException(errMessage); +// } +// } +// // then clean backup image +// deleteBackups(new String[] { backupId }); +// } + + @Override + public List getHistory(int n) throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List history = table.getBackupHistory(); + if( history.size() <= n) return history; + List list = new ArrayList(); + for(int i=0; i < n; i++){ + list.add(history.get(i)); + } + return list; + } + } + + + @Override + public List listBackupSets() throws IOException{ + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List list = table.listBackupSets(); + List bslist = new ArrayList(); + for (String s : list) { + List tables = table.describeBackupSet(s); + bslist.add( new BackupSet(s, tables)); + } + return bslist; + } + } + + + @Override + public BackupSet getBackupSet(String name) throws IOException{ + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List list = table.describeBackupSet(name); + return new BackupSet(name, list); + } + } + + @Override + public boolean deleteBackupSet(String name) throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + if(table.describeBackupSet(name) == null) { + return false; + } + table.deleteBackupSet(name); + return true; + } + } + + @Override + public void addToBackupSet(String name, String[] tablesOrNamespaces) throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + table.addToBackupSet(name, tablesOrNamespaces); + System.out.println("Added tables to '" + name + "'"); + } + } + + @Override + public void removeFromBackupSet(String name, String[] tablesOrNamepsaces) throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + table.removeFromBackupSet(name, tablesOrNamepsaces); + System.out.println("Removed tables from '" + name + "'"); + } + } + + @Override + public Configuration getConf() { + return conf; + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 1789cdf1f2d109d2282cd7deeae78ce5c530e602..bd03605ecbdb3f302466427b9406ed4eb26ea892 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -19,12 +19,21 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; +import java.util.List; + import org.apache.commons.cli.CommandLine; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClient; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; +import org.apache.hadoop.hbase.backup.util.BackupSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; @@ -41,18 +50,54 @@ import com.google.common.collect.Lists; public final class BackupCommands { private static final String USAGE = "Usage: hbase backup COMMAND\n" - + "where COMMAND is one of:\n" + " create create a new backup image\n" + + "where COMMAND is one of:\n" + + " create create a new backup image\n" + + " cancel cancel an ongoing backup\n" + + " delete delete an existing backup image\n" + + " describe show the detailed information of a backup image\n" + + " history show history of all successful backups\n" + + " progress show the progress of the latest backup request\n" + + " set backup set management\n" + "Enter \'help COMMAND\' to see help message for each command\n"; private static final String CREATE_CMD_USAGE = - "Usage: hbase backup create [tables] [-convert] " - + "\n" + " type \"full\" to create a full backup image;\n" + "Usage: hbase backup create [tables] [-s name] [-convert] " + + "[-silent] [-w workers][-b bandwith]\n" + " type \"full\" to create a full backup image;\n" + " \"incremental\" to create an incremental backup image\n" - + " backup_root_path The full root path to store the backup image,\n" - + " the prefix can be hdfs, webhdfs, gpfs, etc\n" + " Options:\n" - + " tables If no tables (\"\") are specified, all tables are backed up. " + + " backup_root_path The full root path to store the backup image,\n" + + " the prefix can be hdfs, webhdfs or gpfs\n" + " Options:\n" + + " tables If no tables (\"\") are specified, all tables are backed up. " + "Otherwise it is a\n" + " comma separated list of tables.\n" - + " -convert For an incremental backup, convert WAL files to HFiles\n"; + + " -s name Use the specified snapshot for full backup\n" + + " -convert For an incremental backup, convert WAL files to HFiles\n" + + " -w number of parallel workers.\n" + + " -b bandwith per one worker (in MB sec)" ; + + private static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n" + + " backupId backup image id;\n"; + + private static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n" + + " backupId backup image id\n"; + + private static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [-n N]\n" + + " -n N show up to N last backup sessions, default - 10;\n"; + + private static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n" + + " backupId backup image id;\n"; + + private static final String CANCEL_CMD_USAGE = "Usage: hbase backup progress \n" + + " backupId backup image id;\n"; + + private static final String SET_CMD_USAGE = "Usage: hbase set COMMAND [name] [tables]\n" + + " name Backup set name\n" + + " tables If no tables (\"\") are specified, all tables will belong to the set. " + + "Otherwise it is a\n" + " comma separated list of tables.\n" + + "where COMMAND is one of:\n" + + " add add tables to a set, crete set if needed\n" + + " remove remove tables from set\n" + + " list list all sets\n" + + " describe describes set\n" + + " delete delete backup set\n"; public static abstract class Command extends Configured { Command(Configuration conf) { @@ -68,25 +113,44 @@ public final class BackupCommands { public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) { Command cmd = null; switch (type) { - case CREATE: - cmd = new CreateCommand(conf, cmdline); - break; - case HELP: - default: - cmd = new HelpCommand(conf, cmdline); - break; + case CREATE: + cmd = new CreateCommand(conf, cmdline); + break; + case DESCRIBE: + cmd = new DescribeCommand(conf, cmdline); + break; + case PROGRESS: + cmd = new ProgressCommand(conf, cmdline); + break; + case DELETE: + cmd = new DeleteCommand(conf, cmdline); + break; + case CANCEL: + cmd = new CancelCommand(conf, cmdline); + break; + case HISTORY: + cmd = new HistoryCommand(conf, cmdline); + break; + case SET: + cmd = new BackupSetCommand(conf, cmdline); + break; + case HELP: + default: + cmd = new HelpCommand(conf, cmdline); + break; } return cmd; } - private static class CreateCommand extends Command { + + public static class CreateCommand extends Command { CommandLine cmdline; CreateCommand(Configuration conf, CommandLine cmdline) { super(conf); this.cmdline = cmdline; } - + @Override public void execute() throws IOException { if (cmdline == null || cmdline.getArgs() == null) { @@ -95,31 +159,53 @@ public final class BackupCommands { System.exit(-1); } String[] args = cmdline.getArgs(); - if (args.length < 2 || args.length > 3) { + if (args.length < 3 || args.length > 4) { System.out.println("ERROR: wrong number of arguments"); System.out.println(CREATE_CMD_USAGE); System.exit(-1); } - if (!BackupType.FULL.toString().equalsIgnoreCase(args[0]) - && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[0])) { + if (!BackupType.FULL.toString().equalsIgnoreCase(args[1]) + && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) { System.out.println("ERROR: invalid backup type"); System.out.println(CREATE_CMD_USAGE); System.exit(-1); } - String tables = (args.length == 3) ? args[2] : null; + String tables = null; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + + // Check backup set + if (cmdline.hasOption("set")) { + String setName = cmdline.getOptionValue("set"); + tables = getTablesForSet(setName, conf); + + if (tables == null) throw new IOException("Backup set '" + setName + + "' is either empty or does not exist"); + } else { + tables = (args.length == 4) ? args[3] : null; + } + int bandwidth = cmdline.hasOption('b') ? Integer.parseInt(cmdline.getOptionValue('b')) : -1; + int workers = cmdline.hasOption('w') ? Integer.parseInt(cmdline.getOptionValue('w')) : -1; try (Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin();) { BackupRequest request = new BackupRequest(); - request.setBackupType(BackupType.valueOf(args[0].toUpperCase())) - .setTableList(Lists.newArrayList(BackupUtil.parseTableNames(tables))) - .setTargetRootDir(args[1]); + request.setBackupType(BackupType.valueOf(args[1].toUpperCase())) + .setTableList(tables != null?Lists.newArrayList(BackupUtil.parseTableNames(tables)): null) + .setTargetRootDir(args[2]).setWorkers(workers).setBandwidth(bandwidth); admin.backupTables(request); } catch (IOException e) { - System.err.println("ERROR: " + e.getMessage()); - System.exit(-1); + throw e; + } + } + private String getTablesForSet(String name, Configuration conf) + throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List tables = table.describeBackupSet(name); + if (tables == null) return null; + return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); } } } @@ -155,9 +241,321 @@ public final class BackupCommands { if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) { System.out.println(CREATE_CMD_USAGE); - } // other commands will be supported in future jira + } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(type)) { + System.out.println(DESCRIBE_CMD_USAGE); + } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(type)) { + System.out.println(HISTORY_CMD_USAGE); + } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(type)) { + System.out.println(PROGRESS_CMD_USAGE); + } else if (BackupCommand.DELETE.name().equalsIgnoreCase(type)) { + System.out.println(DELETE_CMD_USAGE); + } + if (BackupCommand.CANCEL.name().equalsIgnoreCase(type)) { + System.out.println(CANCEL_CMD_USAGE); + } + if (BackupCommand.SET.name().equalsIgnoreCase(type)) { + System.out.println(SET_CMD_USAGE); + } else { + System.out.println("Unknown command : " + type); + System.out.println(USAGE); + } System.exit(0); } } + private static class DescribeCommand extends Command { + CommandLine cmdline; + + DescribeCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null) { + System.out.println("ERROR: missing arguments"); + System.out.println(DESCRIBE_CMD_USAGE); + System.exit(-1); + } + String[] args = cmdline.getArgs(); + if (args.length != 2) { + System.out.println("ERROR: wrong number of arguments"); + System.out.println(DESCRIBE_CMD_USAGE); + System.exit(-1); + } + + String backupId = args[1]; + try { + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + BackupInfo info = client.getBackupInfo(backupId); + System.out.println(info.getShortDescription()); + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + } + + private static class ProgressCommand extends Command { + CommandLine cmdline; + + ProgressCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null || + cmdline.getArgs().length != 2) { + System.out.println("No backup id was specified, " + + "will retrieve the most recent (ongoing) sessions"); + } + String[] args = cmdline.getArgs(); + if (args.length > 2) { + System.out.println("ERROR: wrong number of arguments: " + args.length); + System.out.println(PROGRESS_CMD_USAGE); + System.exit(-1); + } + + String backupId = args == null ? null : args[1]; + try { + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + int progress = client.getProgress(backupId); + if(progress < 0){ + System.out.println("No info was found for backup id: "+backupId); + } else{ + System.out.println(backupId+" progress=" + progress+"%"); + } + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + } + + private static class DeleteCommand extends Command { + + CommandLine cmdline; + DeleteCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null || + cmdline.getArgs().length < 2) { + System.out.println("No backup id(s) was specified"); + System.out.println(PROGRESS_CMD_USAGE); + System.exit(-1); + } + String[] args = cmdline.getArgs(); + + String[] backupIds = new String[args.length-1]; + System.arraycopy(args, 1, backupIds, 0, backupIds.length); + try { + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + client.deleteBackups(args); + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + } + +// TODO Cancel command + + private static class CancelCommand extends Command { + CommandLine cmdline; + + CancelCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || + cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + System.out.println("No backup id(s) was specified, will use the most recent one"); + } + String[] args = cmdline.getArgs(); + String backupId = args == null || args.length == 0 ? null : args[1]; + try { + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); +//TODO +// client.cancelBackup(backupId); + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + } + + private static class HistoryCommand extends Command { + CommandLine cmdline; + private final static int DEFAULT_HISTORY_LENGTH = 10; + + HistoryCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + + int n = parseHistoryLength(); + try { + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + List history = client.getHistory(n); + for(BackupInfo info: history){ + System.out.println(info.getShortDescription()); + } + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + + private int parseHistoryLength() { + String value = cmdline.getOptionValue("n"); + if (value == null) return DEFAULT_HISTORY_LENGTH; + return Integer.parseInt(value); + } + } + + private static class BackupSetCommand extends Command { + private final static String SET_ADD_CMD = "add"; + private final static String SET_REMOVE_CMD = "remove"; + private final static String SET_DELETE_CMD = "delete"; + private final static String SET_DESCRIBE_CMD = "describe"; + private final static String SET_LIST_CMD = "list"; + + CommandLine cmdline; + + BackupSetCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + + // Command-line must have at least one element + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + throw new IOException("command line format"); + } + String[] args = cmdline.getArgs(); + String cmdStr = args[1]; + BackupCommand cmd = getCommand(cmdStr); + + try { + + switch (cmd) { + case SET_ADD: + processSetAdd(args); + break; + case SET_REMOVE: + processSetRemove(args); + break; + case SET_DELETE: + processSetDelete(args); + break; + case SET_DESCRIBE: + processSetDescribe(args); + break; + case SET_LIST: + processSetList(args); + break; + default: + break; + + } + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + + private void processSetList(String[] args) throws IOException { + // List all backup set names + // does not expect any args + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + client.listBackupSets(); + } + + private void processSetDescribe(String[] args) throws IOException { + if (args == null || args.length != 3) { + throw new RuntimeException("Wrong number of args: "+args.length); + } + String setName = args[2]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + BackupSet set = client.getBackupSet(setName); + System.out.println(set); + } + + private void processSetDelete(String[] args) throws IOException { + if (args == null || args.length != 3) { + throw new RuntimeException("Wrong number of args"); + } + String setName = args[2]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + boolean result = client.deleteBackupSet(setName); + if(result){ + System.out.println("Delete set "+setName+" OK."); + } else{ + System.out.println("Set "+setName+" does not exists"); + } + } + + private void processSetRemove(String[] args) throws IOException { + if (args == null || args.length != 4) { + throw new RuntimeException("Wrong args"); + } + String setName = args[2]; + String[] tables = args[3].split(","); + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + client.removeFromBackupSet(setName, tables); + } + + private void processSetAdd(String[] args) throws IOException { + if (args == null || args.length != 4) { + throw new RuntimeException("Wrong args"); + } + String setName = args[2]; + String[] tables = args[3].split(","); + Configuration conf = getConf() != null? getConf():HBaseConfiguration.create(); + BackupClient client = BackupRestoreFactory.getBackupClient(conf); + client.addToBackupSet(setName, tables); + } + + private BackupCommand getCommand(String cmdStr) throws IOException { + if (cmdStr.equals(SET_ADD_CMD)) { + return BackupCommand.SET_ADD; + } else if (cmdStr.equals(SET_REMOVE_CMD)) { + return BackupCommand.SET_REMOVE; + } else if (cmdStr.equals(SET_DELETE_CMD)) { + return BackupCommand.SET_DELETE; + } else if (cmdStr.equals(SET_DESCRIBE_CMD)) { + return BackupCommand.SET_DESCRIBE; + } else if (cmdStr.equals(SET_LIST_CMD)) { + return BackupCommand.SET_LIST; + } else { + throw new IOException("Unknown command for 'set' :" + cmdStr); + } + } + + } + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java deleted file mode 100644 index 06e66dcfc75fec147010417a888d12e903bc5f0c..0000000000000000000000000000000000000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java +++ /dev/null @@ -1,402 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; - -/** - * An object to encapsulate the information for each backup request - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupContext { - - public Map getBackupStatusMap() { - return backupStatusMap; - } - - public void setBackupStatusMap(Map backupStatusMap) { - this.backupStatusMap = backupStatusMap; - } - - public HashMap> getTableSetTimestampMap() { - return tableSetTimestampMap; - } - - public void setTableSetTimestampMap( - HashMap> tableSetTimestampMap) { - this.tableSetTimestampMap = tableSetTimestampMap; - } - - public String getHlogTargetDir() { - return hlogTargetDir; - } - - public void setType(BackupType type) { - this.type = type; - } - - public void setTargetRootDir(String targetRootDir) { - this.targetRootDir = targetRootDir; - } - - public void setTotalBytesCopied(long totalBytesCopied) { - this.totalBytesCopied = totalBytesCopied; - } - - // backup status flag - public static enum BackupState { - RUNNING, COMPLETE, FAILED, CANCELLED; - } - - public void setCancelled(boolean cancelled) { - this.state = BackupState.CANCELLED;; - } - - // backup phase - // for overall backup (for table list, some table may go online, while some may go offline) - protected static enum BackupPhase { - SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; - } - - // backup id: a timestamp when we request the backup - private String backupId; - - // backup type, full or incremental - private BackupType type; - - // target root directory for storing the backup files - private String targetRootDir; - - // overall backup state - private BackupState state; - - // overall backup phase - private BackupPhase phase; - - // overall backup failure message - private String failedMsg; - - // backup status map for all tables - private Map backupStatusMap; - - // actual start timestamp of the backup process - private long startTs; - - // actual end timestamp of the backup process, could be fail or complete - private long endTs; - - // the total bytes of incremental logs copied - private long totalBytesCopied; - - // for incremental backup, the location of the backed-up hlogs - private String hlogTargetDir = null; - - // incremental backup file list - transient private List incrBackupFileList; - - // new region server log timestamps for table set after distributed log roll - // key - table name, value - map of RegionServer hostname -> last log rolled timestamp - transient private HashMap> tableSetTimestampMap; - - // backup progress in %% (0-100) - - private int progress; - - public BackupContext() { - } - - public BackupContext(String backupId, BackupType type, TableName[] tables, String targetRootDir) { - backupStatusMap = new HashMap(); - - this.backupId = backupId; - this.type = type; - this.targetRootDir = targetRootDir; - - this.addTables(tables); - - if (type == BackupType.INCREMENTAL) { - setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId)); - } - - this.startTs = 0; - this.endTs = 0; - } - - /** - * Set progress string - * @param msg progress message - */ - - public void setProgress(int p) { - this.progress = p; - } - - /** - * Get current progress - */ - public int getProgress() { - return progress; - } - - - /** - * Has been marked as cancelled or not. - * @return True if marked as cancelled - */ - public boolean isCancelled() { - return this.state == BackupState.CANCELLED; - } - - public String getBackupId() { - return backupId; - } - - public void setBackupId(String backupId) { - this.backupId = backupId; - } - - public BackupStatus getBackupStatus(TableName table) { - return this.backupStatusMap.get(table); - } - - public String getFailedMsg() { - return failedMsg; - } - - public void setFailedMsg(String failedMsg) { - this.failedMsg = failedMsg; - } - - public long getStartTs() { - return startTs; - } - - public void setStartTs(long startTs) { - this.startTs = startTs; - } - - public long getEndTs() { - return endTs; - } - - public void setEndTs(long endTs) { - this.endTs = endTs; - } - - public long getTotalBytesCopied() { - return totalBytesCopied; - } - - public BackupState getState() { - return state; - } - - public void setState(BackupState flag) { - this.state = flag; - } - - public BackupPhase getPhase() { - return phase; - } - - public void setPhase(BackupPhase phase) { - this.phase = phase; - } - - public BackupType getType() { - return type; - } - - public void setSnapshotName(TableName table, String snapshotName) { - this.backupStatusMap.get(table).setSnapshotName(snapshotName); - } - - public String getSnapshotName(TableName table) { - return this.backupStatusMap.get(table).getSnapshotName(); - } - - public List getSnapshotNames() { - List snapshotNames = new ArrayList(); - for (BackupStatus backupStatus : this.backupStatusMap.values()) { - snapshotNames.add(backupStatus.getSnapshotName()); - } - return snapshotNames; - } - - public Set getTables() { - return this.backupStatusMap.keySet(); - } - - public List getTableNames() { - return new ArrayList(backupStatusMap.keySet()); - } - - public void addTables(TableName[] tables) { - for (TableName table : tables) { - BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); - this.backupStatusMap.put(table, backupStatus); - } - } - - public String getTargetRootDir() { - return targetRootDir; - } - - public void setHlogTargetDir(String hlogTagetDir) { - this.hlogTargetDir = hlogTagetDir; - } - - public String getHLogTargetDir() { - return hlogTargetDir; - } - - public List getIncrBackupFileList() { - return incrBackupFileList; - } - - public List setIncrBackupFileList(List incrBackupFileList) { - this.incrBackupFileList = incrBackupFileList; - return this.incrBackupFileList; - } - - /** - * Set the new region server log timestamps after distributed log roll - * @param newTableSetTimestampMap table timestamp map - */ - public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) { - this.tableSetTimestampMap = newTableSetTimestampMap; - } - - /** - * Get new region server log timestamps after distributed log roll - * @return new region server log timestamps - */ - public HashMap> getIncrTimestampMap() { - return this.tableSetTimestampMap; - } - - public TableName getTableBySnapshot(String snapshotName) { - for (Entry entry : this.backupStatusMap.entrySet()) { - if (snapshotName.equals(entry.getValue().getSnapshotName())) { - return entry.getKey(); - } - } - return null; - } - - BackupProtos.BackupContext toBackupContext() { - BackupProtos.BackupContext.Builder builder = - BackupProtos.BackupContext.newBuilder(); - builder.setBackupId(getBackupId()); - setBackupStatusMap(builder); - builder.setEndTs(getEndTs()); - if(getFailedMsg() != null){ - builder.setFailedMessage(getFailedMsg()); - } - if(getState() != null){ - builder.setState(BackupProtos.BackupContext.BackupState.valueOf(getState().name())); - } - if(getPhase() != null){ - builder.setPhase(BackupProtos.BackupContext.BackupPhase.valueOf(getPhase().name())); - } - if(getHLogTargetDir() != null){ - builder.setHlogTargetDir(getHLogTargetDir()); - } - - builder.setProgress(getProgress()); - builder.setStartTs(getStartTs()); - builder.setTargetRootDir(getTargetRootDir()); - builder.setTotalBytesCopied(getTotalBytesCopied()); - builder.setType(BackupProtos.BackupType.valueOf(getType().name())); - return builder.build(); - } - - public byte[] toByteArray() throws IOException { - return toBackupContext().toByteArray(); - } - - private void setBackupStatusMap(Builder builder) { - for (Entry entry: backupStatusMap.entrySet()) { - builder.addTableBackupStatus(entry.getValue().toProto()); - } - } - - public static BackupContext fromByteArray(byte[] data) throws IOException { - return fromProto(BackupProtos.BackupContext.parseFrom(data)); - } - - public static BackupContext fromStream(final InputStream stream) throws IOException { - return fromProto(BackupProtos.BackupContext.parseDelimitedFrom(stream)); - } - - static BackupContext fromProto(BackupProtos.BackupContext proto) { - BackupContext context = new BackupContext(); - context.setBackupId(proto.getBackupId()); - context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); - context.setEndTs(proto.getEndTs()); - if(proto.hasFailedMessage()) { - context.setFailedMsg(proto.getFailedMessage()); - } - if(proto.hasState()) { - context.setState(BackupContext.BackupState.valueOf(proto.getState().name())); - } - if(proto.hasHlogTargetDir()) { - context.setHlogTargetDir(proto.getHlogTargetDir()); - } - if(proto.hasPhase()) { - context.setPhase(BackupPhase.valueOf(proto.getPhase().name())); - } - if(proto.hasProgress()) { - context.setProgress(proto.getProgress()); - } - context.setStartTs(proto.getStartTs()); - context.setTargetRootDir(proto.getTargetRootDir()); - context.setTotalBytesCopied(proto.getTotalBytesCopied()); - context.setType(BackupType.valueOf(proto.getType().name())); - return context; - } - - private static Map toMap(List list) { - HashMap map = new HashMap<>(); - for (TableBackupStatus tbs : list){ - map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); - } - return map; - } - -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java index 1e8da630ecd1cf5019831401d171c50aee21a067..a738b5dbb50cae21c71ec12e7149b45851032413 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -32,6 +33,24 @@ public interface BackupCopyService extends Configurable { FULL, INCREMENTAL } - public int copy(BackupContext backupContext, BackupManager backupManager, Configuration conf, + /** + * Copy backup data + * @param backupContext - context + * @param backupManager - manager + * @param conf - configuration + * @param copyType - copy type + * @param options - list of options + * @return result (0 - success) + * @throws IOException + */ + public int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf, BackupCopyService.Type copyType, String[] options) throws IOException; + + + /** + * Cancel copy job + * @param jobHandler - copy job handler + * @throws IOException + */ + public void cancelCopyJob(String jobHandler) throws IOException; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java index af70cc8dbb49577cadfff4eeed636ddec8c23663..ca204b433e0c1026ad2032c225785b726074706d 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.backup.impl; import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -29,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public class BackupException extends HBaseIOException { - private BackupContext description; + private BackupInfo description; /** * Some exception happened for a backup and don't even know the backup that it was about @@ -52,7 +53,7 @@ public class BackupException extends HBaseIOException { * @param msg reason why the backup failed * @param desc description of the backup that is being failed */ - public BackupException(String msg, BackupContext desc) { + public BackupException(String msg, BackupInfo desc) { super(msg); this.description = desc; } @@ -63,7 +64,7 @@ public class BackupException extends HBaseIOException { * @param cause root cause of the failure * @param desc description of the backup that is being failed */ - public BackupException(String msg, Throwable cause, BackupContext desc) { + public BackupException(String msg, Throwable cause, BackupInfo desc) { super(msg, cause); this.description = desc; } @@ -78,7 +79,7 @@ public class BackupException extends HBaseIOException { super(message, e); } - public BackupContext getBackupContext() { + public BackupInfo getBackupContext() { return this.description; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index b4d47d3e28ae8aad6704a5e41e347ba4774af367..ba09c8d66762349857d7e8643e29b77e492ede59 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.backup.impl; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; @@ -27,10 +25,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -42,18 +37,23 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData; +import org.apache.hadoop.hbase.backup.master.BackupController; import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * Handles backup requests on server-side, creates backup context records in hbase:backup * to keep track backup. The timestamps kept in hbase:backup table will be used for future @@ -65,7 +65,7 @@ public class BackupManager implements Closeable { private static final Log LOG = LogFactory.getLog(BackupManager.class); private Configuration conf = null; - private BackupContext backupContext = null; + private BackupInfo backupContext = null; private ExecutorService pool = null; @@ -86,12 +86,21 @@ public class BackupManager implements Closeable { HConstants.BACKUP_ENABLE_KEY + " setting."); } this.conf = conf; - this.conn = ConnectionFactory.createConnection(conf); // TODO: get Connection from elsewhere? + this.conn = ConnectionFactory.createConnection(conf); this.systemTable = new BackupSystemTable(conn); + Runtime.getRuntime().addShutdownHook(new ExitHandler()); + } /** + * Return backup context + */ + protected BackupInfo getBackupContext() + { + return backupContext; + } + /** * This method modifies the master's configuration in order to inject backup-related features * @param conf configuration */ @@ -99,16 +108,61 @@ public class BackupManager implements Closeable { if (!isBackupEnabled(conf)) { return; } + // Add WAL archive cleaner plug-in String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); String cleanerClass = BackupLogCleaner.class.getCanonicalName(); if (!plugins.contains(cleanerClass)) { conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); + } + + String classes = conf.get("hbase.procedure.master.classes"); + String masterProcedureClass = LogRollMasterProcedureManager.class.getName(); + if(classes == null){ + conf.set("hbase.procedure.master.classes", masterProcedureClass); + } else if(!classes.contains(masterProcedureClass)){ + conf.set("hbase.procedure.master.classes", classes +","+masterProcedureClass); + } + + // Set Master Observer - Backup Controller + classes = conf.get("hbase.coprocessor.master.classes"); + String observerClass = BackupController.class.getName(); + if(classes == null){ + conf.set("hbase.coprocessor.master.classes", observerClass); + } else if(!classes.contains(observerClass)){ + conf.set("hbase.coprocessor.master.classes", classes +","+observerClass); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Added log cleaner: " + cleanerClass); + LOG.debug("Added master procedure manager: "+masterProcedureClass); + LOG.debug("Added master observer: "+observerClass); } - if (LOG.isTraceEnabled()) { - LOG.trace("Added log cleaner: " + cleanerClass); - } + } + /** + * This method modifies the RS configuration in order to inject backup-related features + * @param conf configuration + */ + public static void decorateRSConfiguration(Configuration conf) { + if (!isBackupEnabled(conf)) { + return; + } + + String classes = conf.get("hbase.procedure.regionserver.classes"); + String regionProcedureClass = LogRollRegionServerProcedureManager.class.getName(); + if(classes == null){ + conf.set("hbase.procedure.regionserver.classes", regionProcedureClass); + } else if(!classes.contains(regionProcedureClass)){ + conf.set("hbase.procedure.regionserver.classes", classes +","+regionProcedureClass); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Added region procedure manager: "+regionProcedureClass); + } + + } + + private static boolean isBackupEnabled(Configuration conf) { return conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT); } @@ -172,9 +226,9 @@ public class BackupManager implements Closeable { this.pool.shutdownNow(); } if (systemTable != null) { - try{ + try { systemTable.close(); - } catch(Exception e){ + } catch (Exception e) { LOG.error(e); } } @@ -197,9 +251,9 @@ public class BackupManager implements Closeable { * @return BackupContext context * @throws BackupException exception */ - protected BackupContext createBackupContext(String backupId, BackupType type, - List tableList, String targetRootDir) throws BackupException { - + public BackupInfo createBackupContext(String backupId, BackupType type, + List tableList, String targetRootDir, int workers, long bandwidth) + throws BackupException { if (targetRootDir == null) { throw new BackupException("Wrong backup request parameter: target backup root directory"); } @@ -228,8 +282,12 @@ public class BackupManager implements Closeable { } // there are one or more tables in the table list - return new BackupContext(backupId, type, tableList.toArray(new TableName[tableList.size()]), + backupContext = new BackupInfo(backupId, type, + tableList.toArray(new TableName[tableList.size()]), targetRootDir); + backupContext.setBandwidth(bandwidth); + backupContext.setWorkers(workers); + return backupContext; } /** @@ -241,7 +299,7 @@ public class BackupManager implements Closeable { */ private String getOngoingBackupId() throws IOException { - ArrayList sessions = systemTable.getBackupContexts(BackupState.RUNNING); + ArrayList sessions = systemTable.getBackupContexts(BackupState.RUNNING); if (sessions.size() == 0) { return null; } @@ -270,7 +328,7 @@ public class BackupManager implements Closeable { ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); } - public void setBackupContext(BackupContext backupContext) { + public void setBackupContext(BackupInfo backupContext) { this.backupContext = backupContext; } @@ -281,9 +339,10 @@ public class BackupManager implements Closeable { * @throws IOException exception * @throws BackupException exception */ - protected ArrayList getAncestors(BackupContext backupCtx) throws IOException, + public ArrayList getAncestors(BackupInfo backupCtx) throws IOException, BackupException { - LOG.debug("Getting the direct ancestors of the current backup ..."); + LOG.debug("Getting the direct ancestors of the current backup "+ + backupCtx.getBackupId()); ArrayList ancestors = new ArrayList(); @@ -295,15 +354,15 @@ public class BackupManager implements Closeable { // get all backup history list in descending order - ArrayList allHistoryList = getBackupHistory(); - for (BackupCompleteData backup : allHistoryList) { + ArrayList allHistoryList = getBackupHistory(true); + for (BackupInfo backup : allHistoryList) { BackupImage image = - new BackupImage(backup.getBackupToken(), BackupType.valueOf(backup.getType()), - backup.getBackupRootPath(), - backup.getTableList(), Long.parseLong(backup.getStartTime()), Long.parseLong(backup - .getEndTime())); + new BackupImage(backup.getBackupId(), backup.getType(), + backup.getTargetRootDir(), + backup.getTableNames(), backup.getStartTs(), backup + .getEndTs()); // add the full backup image as an ancestor until the last incremental backup - if (backup.getType().equals(BackupType.FULL.toString())) { + if (backup.getType().equals(BackupType.FULL)) { // check the backup image coverage, if previous image could be covered by the newer ones, // then no need to add if (!BackupManifest.canCoverImage(ancestors, image)) { @@ -324,12 +383,11 @@ public class BackupManager implements Closeable { } } else { Path logBackupPath = - HBackupFileSystem.getLogBackupPath(backup.getBackupRootPath(), - backup.getBackupToken()); + HBackupFileSystem.getLogBackupPath(backup.getTargetRootDir(), + backup.getBackupId()); LOG.debug("Current backup has an incremental backup ancestor, " + "touching its image manifest in " + logBackupPath.toString() + " to construct the dependency."); - BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); ancestors.add(lastIncrImage); @@ -352,7 +410,7 @@ public class BackupManager implements Closeable { * @throws BackupException exception * @throws IOException exception */ - protected ArrayList getAncestors(BackupContext backupContext, TableName table) + public ArrayList getAncestors(BackupInfo backupContext, TableName table) throws BackupException, IOException { ArrayList ancestors = getAncestors(backupContext); ArrayList tableAncestors = new ArrayList(); @@ -376,8 +434,8 @@ public class BackupManager implements Closeable { * @param context context * @throws IOException exception */ - public void updateBackupStatus(BackupContext context) throws IOException { - systemTable.updateBackupStatus(context); + public void updateBackupInfo(BackupInfo context) throws IOException { + systemTable.updateBackupInfo(context); } /** @@ -388,7 +446,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public String readBackupStartCode() throws IOException { - return systemTable.readBackupStartCode(); + return systemTable.readBackupStartCode(backupContext.getTargetRootDir()); } /** @@ -397,7 +455,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public void writeBackupStartCode(Long startCode) throws IOException { - systemTable.writeBackupStartCode(startCode); + systemTable.writeBackupStartCode(startCode, backupContext.getTargetRootDir()); } /** @@ -406,7 +464,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public HashMap readRegionServerLastLogRollResult() throws IOException { - return systemTable.readRegionServerLastLogRollResult(); + return systemTable.readRegionServerLastLogRollResult(backupContext.getTargetRootDir()); } /** @@ -414,10 +472,13 @@ public class BackupManager implements Closeable { * @return history info of BackupCompleteData * @throws IOException exception */ - public ArrayList getBackupHistory() throws IOException { + public ArrayList getBackupHistory() throws IOException { return systemTable.getBackupHistory(); } + public ArrayList getBackupHistory(boolean completed) throws IOException { + return systemTable.getBackupHistory(completed); + } /** * Write the current timestamps for each regionserver to hbase:backup after a successful full or * incremental backup. Each table may have a different set of log timestamps. The saved timestamp @@ -427,7 +488,8 @@ public class BackupManager implements Closeable { */ public void writeRegionServerLogTimestamp(Set tables, HashMap newTimestamps) throws IOException { - systemTable.writeRegionServerLogTimestamp(tables, newTimestamps); + systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, + backupContext.getTargetRootDir()); } /** @@ -438,7 +500,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public HashMap> readLogTimestampMap() throws IOException { - return systemTable.readLogTimestampMap(); + return systemTable.readLogTimestampMap(backupContext.getTargetRootDir()); } /** @@ -447,7 +509,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public Set getIncrementalBackupTableSet() throws IOException { - return BackupSystemTableHelper.getIncrementalBackupTableSet(getConnection()); + return systemTable.getIncrementalBackupTableSet(backupContext.getTargetRootDir()); } /** @@ -456,7 +518,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public void addIncrementalBackupTableSet(Set tables) throws IOException { - systemTable.addIncrementalBackupTableSet(tables); + systemTable.addIncrementalBackupTableSet(tables, backupContext.getTargetRootDir()); } /** @@ -465,7 +527,8 @@ public class BackupManager implements Closeable { * safely purged. */ public void recordWALFiles(List files) throws IOException { - systemTable.addWALFiles(files, backupContext.getBackupId()); + systemTable.addWALFiles(files, + backupContext.getBackupId(), backupContext.getTargetRootDir()); } /** @@ -473,8 +536,8 @@ public class BackupManager implements Closeable { * @return WAL files iterator from hbase:backup * @throws IOException */ - public Iterator getWALFilesFromBackupSystem() throws IOException { - return systemTable.getWALFilesIterator(); + public Iterator getWALFilesFromBackupSystem() throws IOException { + return systemTable.getWALFilesIterator(backupContext.getTargetRootDir()); } public Connection getConnection() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java index 6264fc5e0be75f4f91e222cdd32a1b4bdf375c84..05f8eb50637ae89aa6942be936623b284199c522 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -102,11 +103,18 @@ public class BackupManifest { for(HBaseProtos.TableName tn : tableListList) { tableList.add(ProtobufUtil.toTableName(tn)); } + + List ancestorList = im.getAncestorsList(); + BackupType type = im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: BackupType.INCREMENTAL; - return new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); + BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); + for(BackupProtos.BackupImage img: ancestorList) { + image.addAncestor(fromProto(img)); + } + return image; } BackupProtos.BackupImage toProto() { @@ -204,7 +212,7 @@ public class BackupManifest { public boolean hasTable(TableName table) { for (TableName t : tableList) { - if (t.getNameAsString().equals(table)) { + if (t.equals(table)) { return true; } } @@ -251,50 +259,41 @@ public class BackupManifest { // actual complete timestamp of the backup process private long completeTs; - // total bytes for table backup image - private long totalBytes; - - // total bytes for the backed-up logs for incremental backup - private long logBytes; - // the region server timestamp for tables: // > private Map> incrTimeRanges; // dependency of this backup, including all the dependent images to do PIT recovery private Map dependency; - - // the indicator of the image compaction - private boolean isCompacted = false; + /** * Construct manifest for a ongoing backup. * @param backupCtx The ongoing backup context */ - public BackupManifest(BackupContext backupCtx) { + public BackupManifest(BackupInfo backupCtx) { this.backupId = backupCtx.getBackupId(); this.type = backupCtx.getType(); this.rootDir = backupCtx.getTargetRootDir(); if (this.type == BackupType.INCREMENTAL) { this.logBackupDir = backupCtx.getHLogTargetDir(); - this.logBytes = backupCtx.getTotalBytesCopied(); } this.startTs = backupCtx.getStartTs(); this.completeTs = backupCtx.getEndTs(); this.loadTableList(backupCtx.getTableNames()); } - + + /** * Construct a table level manifest for a backup of the named table. * @param backupCtx The ongoing backup context */ - public BackupManifest(BackupContext backupCtx, TableName table) { + public BackupManifest(BackupInfo backupCtx, TableName table) { this.backupId = backupCtx.getBackupId(); this.type = backupCtx.getType(); this.rootDir = backupCtx.getTargetRootDir(); this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); if (this.type == BackupType.INCREMENTAL) { this.logBackupDir = backupCtx.getHLogTargetDir(); - this.logBytes = backupCtx.getTotalBytesCopied(); } this.startTs = backupCtx.getStartTs(); this.completeTs = backupCtx.getEndTs(); @@ -361,15 +360,8 @@ public class BackupManifest { loadTableList(proto); this.startTs = proto.getStartTs(); this.completeTs = proto.getCompleteTs(); - this.totalBytes = proto.getTotalBytes(); - if (this.type == BackupType.INCREMENTAL) { - this.logBytes = proto.getLogBytes(); - //TODO: convert will be implemented by future jira - } - loadIncrementalTimestampMap(proto); loadDependency(proto); - this.isCompacted = proto.getCompacted(); //TODO: merge will be implemented by future jira LOG.debug("Loaded manifest instance from manifest file: " + FSUtils.getPath(subFile.getPath())); @@ -377,11 +369,9 @@ public class BackupManifest { } } String errorMsg = "No manifest file found in: " + backupPath.toString(); - LOG.error(errorMsg); throw new IOException(errorMsg); } catch (IOException e) { - LOG.error(e); throw new BackupException(e.getMessage()); } } @@ -405,10 +395,20 @@ public class BackupManifest { } private void loadDependency(BackupProtos.BackupManifest proto) { + if(LOG.isDebugEnabled()) { + LOG.debug("load dependency for: "+proto.getBackupId()); + } + dependency = new HashMap(); List list = proto.getDependentBackupImageList(); for (BackupProtos.BackupImage im : list) { - dependency.put(im.getBackupId(), BackupImage.fromProto(im)); + BackupImage bim = BackupImage.fromProto(im); + if(im.getBackupId() != null){ + dependency.put(im.getBackupId(), bim); + } else{ + LOG.warn("Load dependency for backup manifest: "+ backupId+ + ". Null backup id in dependent image"); + } } } @@ -463,6 +463,7 @@ public class BackupManifest { public void store(Configuration conf) throws BackupException { byte[] data = toByteArray(); + // write the file, overwrite if already exist Path manifestFilePath = new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) @@ -472,13 +473,11 @@ public class BackupManifest { manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); out.write(data); out.close(); - } catch (IOException e) { - LOG.error(e); + } catch (IOException e) { throw new BackupException(e.getMessage()); } - LOG.debug("Manifestfilestored_to " + this.tableBackupDir != null ? this.tableBackupDir - : this.logBackupDir + Path.SEPARATOR + MANIFEST_FILE_NAME); + LOG.info("Manifest file stored to " + manifestFilePath); } /** @@ -493,18 +492,15 @@ public class BackupManifest { setTableList(builder); builder.setStartTs(this.startTs); builder.setCompleteTs(this.completeTs); - builder.setTotalBytes(this.totalBytes); - if (this.type == BackupType.INCREMENTAL) { - builder.setLogBytes(this.logBytes); - } setIncrementalTimestampMap(builder); setDependencyMap(builder); - builder.setCompacted(this.isCompacted); return builder.build().toByteArray(); } private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { - if (this.incrTimeRanges == null) return; + if (this.incrTimeRanges == null) { + return; + } for (Entry> entry: this.incrTimeRanges.entrySet()) { TableName key = entry.getKey(); HashMap value = entry.getValue(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java index d0ce059dd07c6b138de47fafd9a55d31097dede0..7233bfafd49cbe1abe9da6fe309dbe73318bbdf7 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java @@ -37,7 +37,8 @@ public final class BackupRestoreConstants { public static final String BACKUPID_PREFIX = "backup_"; public static enum BackupCommand { - CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, + CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, SET, + SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST } private BackupRestoreConstants() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java deleted file mode 100644 index 6e54994a594095839d73db7e1d8949dbeb0f30b0..0000000000000000000000000000000000000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.Serializable; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; - -/** - * Backup status and related information encapsulated for a table. - * At this moment only TargetDir and SnapshotName is encapsulated here. - * future Jira will be implemented for progress, bytesCopies, phase, etc. - */ - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupStatus implements Serializable { - - private static final long serialVersionUID = -5968397963548535982L; - - // table name for backup - private TableName table; - - // target directory of the backup image for this table - private String targetDir; - - // snapshot name for offline/online snapshot - private String snapshotName = null; - - public BackupStatus() { - - } - - public BackupStatus(TableName table, String targetRootDir, String backupId) { - this.table = table; - this.targetDir = HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); - } - - public String getSnapshotName() { - return snapshotName; - } - - public void setSnapshotName(String snapshotName) { - this.snapshotName = snapshotName; - } - - public String getTargetDir() { - return targetDir; - } - - public TableName getTable() { - return table; - } - - public void setTable(TableName table) { - this.table = table; - } - - public void setTargetDir(String targetDir) { - this.targetDir = targetDir; - } - - public static BackupStatus convert(BackupProtos.TableBackupStatus proto) - { - BackupStatus bs = new BackupStatus(); - bs.setTable(ProtobufUtil.toTableName(proto.getTable())); - bs.setTargetDir(proto.getTargetDir()); - if(proto.hasSnapshot()){ - bs.setSnapshotName(proto.getSnapshot()); - } - return bs; - } - - public BackupProtos.TableBackupStatus toProto() { - BackupProtos.TableBackupStatus.Builder builder = - BackupProtos.TableBackupStatus.newBuilder(); - if(snapshotName != null) { - builder.setSnapshot(snapshotName); - } - builder.setTable(ProtobufUtil.toProtoTableName(table)); - builder.setTargetDir(targetDir); - return builder.build(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index c104dd86cf958ee1dd64fa2d41f1ccc8409f5c4c..8c3c2bedc33b3df9901cc2ff1a43ef34b01191de 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -27,20 +27,21 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.TreeSet; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState; -import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -58,50 +59,54 @@ import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; @InterfaceAudience.Private @InterfaceStability.Evolving public final class BackupSystemTable implements Closeable { + + static class WALItem { + String backupId; + String walFile; + String backupRoot; + + WALItem(String backupId, String walFile, String backupRoot) + { + this.backupId = backupId; + this.walFile = walFile; + this.backupRoot = backupRoot; + } - private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); - private final static TableName tableName = TableName.BACKUP_TABLE_NAME; - final static byte[] familyName = "f".getBytes(); + public String getBackupId() { + return backupId; + } + + public String getWalFile() { + return walFile; + } + public String getBackupRoot() { + return backupRoot; + } + + public String toString() { + return backupRoot+"/"+backupId + "/" + walFile; + } + + } + + private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); + private final static TableName tableName = TableName.BACKUP_TABLE_NAME; + // Stores backup sessions (contexts) + final static byte[] SESSIONS_FAMILY = "session".getBytes(); + // Stores other meta + final static byte[] META_FAMILY = "meta".getBytes(); // Connection to HBase cluster, shared // among all instances private final Connection connection; - // Cluster configuration - private final Configuration conf; - - /** - * Create a BackupSystemTable object for the given Connection. Connection is NOT owned by this - * instance and has to be closed explicitly. - * @param connection - * @throws IOException - */ - public BackupSystemTable(Connection connection) throws IOException { - this.connection = connection; - this.conf = connection.getConfiguration(); - - createSystemTableIfNotExists(); + + public BackupSystemTable(Connection conn) throws IOException { + this.connection = conn; } - @Override + public void close() { - } - - private void createSystemTableIfNotExists() throws IOException { - try(Admin admin = connection.getAdmin()) { - if (admin.tableExists(tableName) == false) { - HTableDescriptor tableDesc = new HTableDescriptor(tableName); - HColumnDescriptor colDesc = new HColumnDescriptor(familyName); - colDesc.setMaxVersions(1); - int ttl = - conf.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); - colDesc.setTimeToLive(ttl); - tableDesc.addFamily(colDesc); - admin.createTable(tableDesc); - } - } catch (IOException e) { - LOG.error(e); - throw e; - } + // do nothing } /** @@ -109,7 +114,7 @@ public final class BackupSystemTable implements Closeable { * @param context context * @throws IOException exception */ - public void updateBackupStatus(BackupContext context) throws IOException { + public void updateBackupInfo(BackupInfo context) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("update backup status in hbase:backup for: " + context.getBackupId() @@ -124,16 +129,17 @@ public final class BackupSystemTable implements Closeable { /** * Deletes backup status from hbase:backup table * @param backupId backup id + * @return true, if operation succeeded, false - otherwise * @throws IOException exception */ - public void deleteBackupStatus(String backupId) throws IOException { + public void deleteBackupInfo(String backupId) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("delete backup status in hbase:backup for " + backupId); } try (Table table = connection.getTable(tableName)) { - Delete del = BackupSystemTableHelper.createDeletForBackupContext(backupId); + Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId); table.delete(del); } } @@ -144,7 +150,7 @@ public final class BackupSystemTable implements Closeable { * @return Current status of backup session or null */ - public BackupContext readBackupStatus(String backupId) throws IOException { + public BackupInfo readBackupInfo(String backupId) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("read backup status from hbase:backup for: " + backupId); } @@ -155,7 +161,7 @@ public final class BackupSystemTable implements Closeable { if(res.isEmpty()){ return null; } - return BackupSystemTableHelper.resultToBackupContext(res); + return BackupSystemTableHelper.resultToBackupInfo(res); } } @@ -163,15 +169,16 @@ public final class BackupSystemTable implements Closeable { * Read the last backup start code (timestamp) of last successful backup. Will return null if * there is no start code stored on hbase or the value is of length 0. These two cases indicate * there is no successful backup completed so far. + * @param backupRoot root directory path to backup * @return the timestamp of last successful backup * @throws IOException exception */ - public String readBackupStartCode() throws IOException { + public String readBackupStartCode(String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("read backup start code from hbase:backup"); } try (Table table = connection.getTable(tableName)) { - Get get = BackupSystemTableHelper.createGetForStartCode(); + Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot); Result res = table.get(get); if (res.isEmpty()) { return null; @@ -190,12 +197,12 @@ public final class BackupSystemTable implements Closeable { * @param startCode start code * @throws IOException exception */ - public void writeBackupStartCode(Long startCode) throws IOException { + public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("write backup start code to hbase:backup " + startCode); } try (Table table = connection.getTable(tableName)) { - Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString()); + Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot); table.put(put); } } @@ -205,13 +212,13 @@ public final class BackupSystemTable implements Closeable { * @return RS log info * @throws IOException exception */ - public HashMap readRegionServerLastLogRollResult() + public HashMap readRegionServerLastLogRollResult(String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("read region server last roll log result to hbase:backup"); } - Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(); + Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot); scan.setMaxVersions(1); try (Table table = connection.getTable(tableName); @@ -224,7 +231,6 @@ public final class BackupSystemTable implements Closeable { byte[] row = CellUtil.cloneRow(cell); String server = BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row); - byte[] data = CellUtil.cloneValue(cell); rsTimestampMap.put(server, Long.parseLong(new String(data))); } @@ -238,78 +244,73 @@ public final class BackupSystemTable implements Closeable { * @param timestamp - last log timestamp * @throws IOException exception */ - public void writeRegionServerLastLogRollResult(String server, Long timestamp) + public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("write region server last roll log result to hbase:backup"); } try (Table table = connection.getTable(tableName)) { Put put = - BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, timestamp); + BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server,ts,backupRoot); table.put(put); } } /** * Get all completed backup information (in desc order by time) + * @param onlyCompeleted, true, if only successfully completed sessions * @return history info of BackupCompleteData * @throws IOException exception */ - public ArrayList getBackupHistory() throws IOException { + public ArrayList getBackupHistory(boolean onlyCompleted) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("get backup history from hbase:backup"); } Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); scan.setMaxVersions(1); - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList(); try (Table table = connection.getTable(tableName); ResultScanner scanner = table.getScanner(scan)) { Result res = null; while ((res = scanner.next()) != null) { res.advance(); - BackupContext context = BackupSystemTableHelper.cellToBackupContext(res.current()); - if (context.getState() != BackupState.COMPLETE) { + BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current()); + if(onlyCompleted && context.getState() != BackupState.COMPLETE){ continue; } - - BackupCompleteData history = new BackupCompleteData(); - history.setBackupToken(context.getBackupId()); - history.setStartTime(Long.toString(context.getStartTs())); - history.setEndTime(Long.toString(context.getEndTs())); - history.setBackupRootPath(context.getTargetRootDir()); - history.setTableList(context.getTableNames()); - history.setType(context.getType().toString()); - history.setBytesCopied(Long.toString(context.getTotalBytesCopied())); - - list.add(history); + list.add(context); } return BackupUtil.sortHistoryListDesc(list); } } + public ArrayList getBackupHistory() throws IOException { + return getBackupHistory(false); + } + /** * Get all backup session with a given status (in desc order by time) * @param status status * @return history info of backup contexts * @throws IOException exception */ - public ArrayList getBackupContexts(BackupState status) throws IOException { + public ArrayList getBackupContexts(BackupState status) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("get backup contexts from hbase:backup"); } Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); scan.setMaxVersions(1); - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList(); try (Table table = connection.getTable(tableName); ResultScanner scanner = table.getScanner(scan)) { Result res = null; while ((res = scanner.next()) != null) { res.advance(); - BackupContext context = BackupSystemTableHelper.cellToBackupContext(res.current()); + BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current()); if (context.getState() != status){ continue; } @@ -327,14 +328,15 @@ public final class BackupSystemTable implements Closeable { * @throws IOException exception */ public void writeRegionServerLogTimestamp(Set tables, - HashMap newTimestamps) throws IOException { + HashMap newTimestamps, String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("write RS log ts to HBASE_BACKUP"); } List puts = new ArrayList(); for (TableName table : tables) { byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray(); - Put put = BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData); + Put put = + BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData, backupRoot); puts.add(put); } try (Table table = connection.getTable(tableName)) { @@ -350,7 +352,7 @@ public final class BackupSystemTable implements Closeable { * RegionServer,PreviousTimeStamp * @throws IOException exception */ - public HashMap> readLogTimestampMap() throws IOException { + public HashMap> readLogTimestampMap(String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("read RS log ts from HBASE_BACKUP"); } @@ -358,7 +360,7 @@ public final class BackupSystemTable implements Closeable { HashMap> tableTimestampMap = new HashMap>(); - Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(); + Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(backupRoot); try (Table table = connection.getTable(tableName); ResultScanner scanner = table.getScanner(scan)) { Result res = null; @@ -412,11 +414,38 @@ public final class BackupSystemTable implements Closeable { } /** + * Return the current tables covered by incremental backup. + * @return set of tableNames + * @throws IOException exception + */ + public Set getIncrementalBackupTableSet(String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get incr backup table set from hbase:backup"); + } + TreeSet set = new TreeSet<>(); + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(backupRoot); + Result res = table.get(get); + if (res.isEmpty()) { + return set; + } + List cells = res.listCells(); + for (Cell cell : cells) { + // qualifier = table name - we use table names as qualifiers + set.add(TableName.valueOf(CellUtil.cloneQualifier(cell))); + } + return set; + } + } + + /** * Add tables to global incremental backup set * @param tables - set of tables * @throws IOException exception */ - public void addIncrementalBackupTableSet(Set tables) throws IOException { + public void addIncrementalBackupTableSet(Set tables, String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("add incr backup table set to hbase:backup"); for (TableName table : tables) { @@ -424,7 +453,7 @@ public final class BackupSystemTable implements Closeable { } } try (Table table = connection.getTable(tableName)) { - Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables); + Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables, backupRoot); table.put(put); } } @@ -434,12 +463,17 @@ public final class BackupSystemTable implements Closeable { * @param files files * @throws IOException exception */ - public void addWALFiles(List files, String backupId) throws IOException { + public void addWALFiles(List files, String backupId, + String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("add WAL files to hbase:backup"); + LOG.debug("add WAL files to hbase:backup: "+backupId +" "+backupRoot); + for(String f: files){ + LOG.debug("add :"+f); + } } try (Table table = connection.getTable(tableName)) { - List puts = BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId); + List puts = + BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot); table.put(puts); } } @@ -449,15 +483,15 @@ public final class BackupSystemTable implements Closeable { * @param files files * @throws IOException exception */ - public Iterator getWALFilesIterator() throws IOException { + public Iterator getWALFilesIterator(String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("get WAL files from hbase:backup"); } final Table table = connection.getTable(tableName); - Scan scan = BackupSystemTableHelper.createScanForGetWALs(); + Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot); final ResultScanner scanner = table.getScanner(scan); final Iterator it = scanner.iterator(); - return new Iterator() { + return new Iterator() { @Override public boolean hasNext() { @@ -475,13 +509,22 @@ public final class BackupSystemTable implements Closeable { } @Override - public String next() { + public WALItem next() { Result next = it.next(); List cells = next.listCells(); byte[] buf = cells.get(0).getValueArray(); int len = cells.get(0).getValueLength(); int offset = cells.get(0).getValueOffset(); - return new String(buf, offset, len); + String backupId = new String(buf, offset, len); + buf = cells.get(1).getValueArray(); + len = cells.get(1).getValueLength(); + offset = cells.get(1).getValueOffset(); + String walFile = new String(buf, offset, len); + buf = cells.get(2).getValueArray(); + len = cells.get(2).getValueLength(); + offset = cells.get(2).getValueOffset(); + String backupRoot = new String(buf, offset, len); + return new WALItem(backupId, walFile, backupRoot); } @Override @@ -495,13 +538,14 @@ public final class BackupSystemTable implements Closeable { /** * Check if WAL file is eligible for deletion + * Future: to support all backup destinations * @param file file * @return true, if - yes. * @throws IOException exception */ public boolean checkWALFile(String file) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("Check if WAL file has been already backuped in hbase:backup"); + LOG.debug("Check if WAL file has been already backuped in hbase:backup "+ file); } try (Table table = connection.getTable(tableName)) { Get get = BackupSystemTableHelper.createGetForCheckWALFile(file); @@ -535,4 +579,224 @@ public final class BackupSystemTable implements Closeable { return result; } } + + /** + * BACKUP SETS + */ + + /** + * Get backup set list + * @return backup set list + * @throws IOException + */ + public List listBackupSets() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" backup set list"); + } + List list = new ArrayList(); + Table table = null; + ResultScanner scanner = null; + try { + table = connection.getTable(tableName); + Scan scan = BackupSystemTableHelper.createScanForBackupSetList(); + scan.setMaxVersions(1); + scanner = table.getScanner(scan); + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current())); + } + return list; + } finally { + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup set description (list of tables) + * @param setName set's name + * @return list of tables in a backup set + * @throws IOException + */ + public List describeBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" backup set describe: "+name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if(res.isEmpty()) return new ArrayList(); + res.advance(); + String[] tables = + BackupSystemTableHelper.cellValueToBackupSet(res.current()); + return toList(tables); + } finally { + if (table != null) { + table.close(); + } + } + } + + private List toList(String[] tables) + { + List list = new ArrayList(tables.length); + for(String name: tables) { + list.add(TableName.valueOf(name)); + } + return list; + } + + /** + * Add backup set (list of tables) + * @param name - set name + * @param tables - list of tables, comma-separated + * @throws IOException + */ + public void addToBackupSet(String name, String[] newTables) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" backup set add: "+name); + } + Table table = null; + String[] union = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if(res.isEmpty()) { + union = newTables; + } else { + res.advance(); + String[] tables = + BackupSystemTableHelper.cellValueToBackupSet(res.current()); + union = merge(tables, newTables); + } + Put put = BackupSystemTableHelper.createPutForBackupSet(name, union); + table.put(put); + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] merge(String[] tables, String[] newTables) { + List list = new ArrayList(); + // Add all from tables + for(String t: tables){ + list.add(t); + } + for(String nt: newTables){ + if(list.contains(nt)) continue; + list.add(nt); + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Remove tables from backup set (list of tables) + * @param name - set name + * @param tables - list of tables, comma-separated + * @throws IOException + */ + public void removeFromBackupSet(String name, String[] toRemove) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" backup set describe: " + name); + } + Table table = null; + String[] disjoint = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) { + return; + } else { + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + disjoint = disjoin(tables, toRemove); + } + if (disjoint.length > 0) { + Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint); + table.put(put); + } else { + // Delete + describeBackupSet(name); + } + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] disjoin(String[] tables, String[] toRemove) { + List list = new ArrayList(); + // Add all from tables + for (String t : tables) { + list.add(t); + } + for (String nt : toRemove) { + if (list.contains(nt)) { + list.remove(nt); + } + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Delete backup set + * @param name set's name + * @throws IOException + */ + public void deleteBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" backup set delete: " + name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name); + table.delete(del); + } finally { + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup system table descriptor + * @return descriptor + */ + public static HTableDescriptor getSystemTableDescriptor() { + HTableDescriptor tableDesc = new HTableDescriptor(tableName); + HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY); + colSessionsDesc.setMaxVersions(1); + // Time to keep backup sessions (secs) + Configuration config = HBaseConfiguration.create(); + int ttl = + config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); + colSessionsDesc.setTimeToLive(ttl); + tableDesc.addFamily(colSessionsDesc); + HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY); + //colDesc.setMaxVersions(1); + tableDesc.addFamily(colMetaDesc); + return tableDesc; + } + + public static String getTableNameAsString() { + return tableName.getNameAsString(); + } + + public static TableName getTableName() { + return tableName; + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java index 04ccbdc81867f6f5d6f260d70b6737059e02a312..7b8265578afe6e1c98a31bfe2870aab437d145e0 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -22,22 +22,19 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Set; -import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; @@ -48,29 +45,31 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private @InterfaceStability.Evolving public final class BackupSystemTableHelper { - private static final Log LOG = LogFactory.getLog(BackupSystemTableHelper.class); + /** * hbase:backup schema: - * 1. Backup sessions rowkey= "session." + backupId; value = serialized + * 1. Backup sessions rowkey= "session:" + backupId; value = serialized * BackupContext - * 2. Backup start code rowkey = "startcode"; value = startcode - * 3. Incremental backup set rowkey="incrbackupset"; value=[list of tables] - * 4. Table-RS-timestamp map rowkey="trslm"+ table_name; value = map[RS-> last WAL timestamp] - * 5. RS - WAL ts map rowkey="rslogts."+server; value = last WAL timestamp - * 6. WALs recorded rowkey="wals."+WAL unique file name; value = backuppId and full WAL file name + * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode + * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] + * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> + * last WAL timestamp] + * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp + * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name */ - private final static String BACKUP_CONTEXT_PREFIX = "session."; - private final static String START_CODE_ROW = "startcode"; - private final static String INCR_BACKUP_SET = "incrbackupset"; - private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm."; - private final static String RS_LOG_TS_PREFIX = "rslogts."; - private final static String WALS_PREFIX = "wals."; - - private final static byte[] col1 = "col1".getBytes(); - private final static byte[] col2 = "col2".getBytes(); + private final static String BACKUP_INFO_PREFIX = "session:"; + private final static String START_CODE_ROW = "startcode:"; + private final static String INCR_BACKUP_SET = "incrbackupset:"; + private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:"; + private final static String RS_LOG_TS_PREFIX = "rslogts:"; + private final static String WALS_PREFIX = "wals:"; + private final static String SET_KEY_PREFIX = "backupset:"; private final static byte[] EMPTY_VALUE = new byte[] {}; + + // Safe delimiter in a string + private final static String NULL = "\u0000"; private BackupSystemTableHelper() { throw new AssertionError("Instantiating utility class..."); @@ -82,10 +81,9 @@ public final class BackupSystemTableHelper { * @return put operation * @throws IOException exception */ - static Put createPutForBackupContext(BackupContext context) throws IOException { - - Put put = new Put((BACKUP_CONTEXT_PREFIX + context.getBackupId()).getBytes()); - put.addColumn(BackupSystemTable.familyName, col1, context.toByteArray()); + static Put createPutForBackupContext(BackupInfo context) throws IOException { + Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId())); + put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray()); return put; } @@ -96,8 +94,8 @@ public final class BackupSystemTableHelper { * @throws IOException exception */ static Get createGetForBackupContext(String backupId) throws IOException { - Get get = new Get((BACKUP_CONTEXT_PREFIX + backupId).getBytes()); - get.addFamily(BackupSystemTable.familyName); + Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId)); + get.addFamily(BackupSystemTable.SESSIONS_FAMILY); get.setMaxVersions(1); return get; } @@ -108,9 +106,9 @@ public final class BackupSystemTableHelper { * @return delete operation * @throws IOException exception */ - public static Delete createDeletForBackupContext(String backupId) { - Delete del = new Delete((BACKUP_CONTEXT_PREFIX + backupId).getBytes()); - del.addFamily(BackupSystemTable.familyName); + public static Delete createDeleteForBackupInfo(String backupId) { + Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId)); + del.addFamily(BackupSystemTable.SESSIONS_FAMILY); return del; } @@ -120,10 +118,10 @@ public final class BackupSystemTableHelper { * @return backup context instance * @throws IOException exception */ - static BackupContext resultToBackupContext(Result res) throws IOException { + static BackupInfo resultToBackupInfo(Result res) throws IOException { res.advance(); Cell cell = res.current(); - return cellToBackupContext(cell); + return cellToBackupInfo(cell); } /** @@ -131,9 +129,9 @@ public final class BackupSystemTableHelper { * @return get operation * @throws IOException exception */ - static Get createGetForStartCode() throws IOException { - Get get = new Get(START_CODE_ROW.getBytes()); - get.addFamily(BackupSystemTable.familyName); + static Get createGetForStartCode(String rootPath) throws IOException { + Get get = new Get(rowkey(START_CODE_ROW, rootPath)); + get.addFamily(BackupSystemTable.META_FAMILY); get.setMaxVersions(1); return get; } @@ -143,9 +141,9 @@ public final class BackupSystemTableHelper { * @return put operation * @throws IOException exception */ - static Put createPutForStartCode(String startCode) { - Put put = new Put(START_CODE_ROW.getBytes()); - put.addColumn(BackupSystemTable.familyName, col1, startCode.getBytes()); + static Put createPutForStartCode(String startCode, String rootPath) { + Put put = new Put(rowkey(START_CODE_ROW, rootPath)); + put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes()); return put; } @@ -154,49 +152,22 @@ public final class BackupSystemTableHelper { * @return get operation * @throws IOException exception */ - static Get createGetForIncrBackupTableSet() throws IOException { - Get get = new Get(INCR_BACKUP_SET.getBytes()); - get.addFamily(BackupSystemTable.familyName); + static Get createGetForIncrBackupTableSet(String backupRoot) throws IOException { + Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot)); + get.addFamily(BackupSystemTable.META_FAMILY); get.setMaxVersions(1); return get; } /** - * Return the current tables covered by incremental backup. - * @return set of tableNames - * @throws IOException exception - */ - public static Set getIncrementalBackupTableSet(Connection connection) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("get incr backup table set from hbase:backup"); - } - TreeSet set = new TreeSet<>(); - - try (Table table = connection.getTable(TableName.BACKUP_TABLE_NAME)) { - Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(); - Result res = table.get(get); - if (res.isEmpty()) { - return set; - } - List cells = res.listCells(); - for (Cell cell : cells) { - // qualifier = table name - we use table names as qualifiers - set.add(TableName.valueOf(CellUtil.cloneQualifier(cell))); - } - return set; - } - } - - /** * Creates Put to store incremental backup table set * @param tables tables * @return put operation */ - static Put createPutForIncrBackupTableSet(Set tables) { - Put put = new Put(INCR_BACKUP_SET.getBytes()); + static Put createPutForIncrBackupTableSet(Set tables, String backupRoot) { + Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot)); for (TableName table : tables) { - put.addColumn(BackupSystemTable.familyName, Bytes.toBytes(table.getNameAsString()), + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()), EMPTY_VALUE); } return put; @@ -208,12 +179,12 @@ public final class BackupSystemTableHelper { */ static Scan createScanForBackupHistory() { Scan scan = new Scan(); - byte[] startRow = BACKUP_CONTEXT_PREFIX.getBytes(); + byte[] startRow = BACKUP_INFO_PREFIX.getBytes(); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.familyName); + scan.addFamily(BackupSystemTable.SESSIONS_FAMILY); return scan; } @@ -224,9 +195,9 @@ public final class BackupSystemTableHelper { * @return backup context instance * @throws IOException exception */ - static BackupContext cellToBackupContext(Cell current) throws IOException { + static BackupInfo cellToBackupInfo(Cell current) throws IOException { byte[] data = CellUtil.cloneValue(current); - return BackupContext.fromByteArray(data); + return BackupInfo.fromByteArray(data); } /** @@ -235,9 +206,10 @@ public final class BackupSystemTableHelper { * @param smap - map, containing RS:ts * @return put operation */ - static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap) { - Put put = new Put((TABLE_RS_LOG_MAP_PREFIX + table).getBytes()); - put.addColumn(BackupSystemTable.familyName, col1, smap); + static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, + String backupRoot) { + Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); + put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap); return put; } @@ -245,14 +217,14 @@ public final class BackupSystemTableHelper { * Creates Scan to load table-> { RS -> ts} map of maps * @return scan operation */ - static Scan createScanForReadLogTimestampMap() { + static Scan createScanForReadLogTimestampMap(String backupRoot) { Scan scan = new Scan(); - byte[] startRow = TABLE_RS_LOG_MAP_PREFIX.getBytes(); + byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.familyName); + scan.addFamily(BackupSystemTable.META_FAMILY); return scan; } @@ -263,8 +235,9 @@ public final class BackupSystemTableHelper { * @return table name */ static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { - int prefixSize = TABLE_RS_LOG_MAP_PREFIX.length(); - return new String(cloneRow, prefixSize, cloneRow.length - prefixSize); + String s = new String(cloneRow); + int index = s.lastIndexOf(NULL); + return s.substring(index +1); } /** @@ -273,9 +246,11 @@ public final class BackupSystemTableHelper { * @param timestamp - log roll result (timestamp) * @return put operation */ - static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp) { - Put put = new Put((RS_LOG_TS_PREFIX + server).getBytes()); - put.addColumn(BackupSystemTable.familyName, col1, timestamp.toString().getBytes()); + static Put createPutForRegionServerLastLogRollResult(String server, + Long timestamp, String backupRoot ) { + Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); + put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), + timestamp.toString().getBytes()); return put; } @@ -283,14 +258,14 @@ public final class BackupSystemTableHelper { * Creates Scan operation to load last RS log roll results * @return scan operation */ - static Scan createScanForReadRegionServerLastLogRollResult() { + static Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) { Scan scan = new Scan(); - byte[] startRow = RS_LOG_TS_PREFIX.getBytes(); + byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.familyName); + scan.addFamily(BackupSystemTable.META_FAMILY); return scan; } @@ -301,8 +276,9 @@ public final class BackupSystemTableHelper { * @return server's name */ static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { - int prefixSize = RS_LOG_TS_PREFIX.length(); - return new String(row, prefixSize, row.length - prefixSize); + String s = new String(row); + int index = s.lastIndexOf(NULL); + return s.substring(index +1); } /** @@ -312,15 +288,16 @@ public final class BackupSystemTableHelper { * @return put list * @throws IOException exception */ - public static List createPutsForAddWALFiles(List files, String backupId) + public static List createPutsForAddWALFiles(List files, + String backupId, String backupRoot) throws IOException { List puts = new ArrayList(); for (String file : files) { - byte[] row = (WALS_PREFIX + BackupUtil.getUniqueWALFileNamePart(file)).getBytes(); - Put put = new Put(row); - put.addColumn(BackupSystemTable.familyName, col1, backupId.getBytes()); - put.addColumn(BackupSystemTable.familyName, col2, file.getBytes()); + Put put = new Put(rowkey(WALS_PREFIX, BackupUtil.getUniqueWALFileNamePart(file))); + put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes()); puts.add(put); } return puts; @@ -328,30 +305,119 @@ public final class BackupSystemTableHelper { /** * Creates Scan operation to load WALs + * TODO: support for backupRoot + * @param backupRoot - path to backup destination * @return scan operation */ - public static Scan createScanForGetWALs() { + public static Scan createScanForGetWALs(String backupRoot) { Scan scan = new Scan(); byte[] startRow = WALS_PREFIX.getBytes(); byte[] stopRow = Arrays.copyOf(startRow, startRow.length); stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addColumn(BackupSystemTable.familyName, col2); + scan.addFamily(BackupSystemTable.META_FAMILY); return scan; } /** * Creates Get operation for a given wal file name + * TODO: support for backup destination * @param file file * @return get operation * @throws IOException exception */ public static Get createGetForCheckWALFile(String file) throws IOException { - byte[] row = (WALS_PREFIX + BackupUtil.getUniqueWALFileNamePart(file)).getBytes(); - Get get = new Get(row); - get.addFamily(BackupSystemTable.familyName); - get.setMaxVersions(1); + Get get = new Get(rowkey(WALS_PREFIX, BackupUtil.getUniqueWALFileNamePart(file))); + // add backup root column + get.addFamily(BackupSystemTable.META_FAMILY); return get; } + + /** + * Creates Scan operation to load backup set list + * @return scan operation + */ + static Scan createScanForBackupSetList() { + Scan scan = new Scan(); + byte[] startRow = SET_KEY_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + return scan; + } + + /** + * Creates Get operation to load backup set content + * @return get operation + */ + static Get createGetForBackupSet(String name) { + Get get = new Get(rowkey(SET_KEY_PREFIX, name)); + get.addFamily(BackupSystemTable.META_FAMILY); + return get; + } + + /** + * Creates Delete operation to delete backup set content + * @return delete operation + */ + static Delete createDeleteForBackupSet(String name) { + Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); + del.addFamily(BackupSystemTable.META_FAMILY); + return del; + } + + + /** + * Creates Put operation to update backup set content + * @return put operation + */ + static Put createPutForBackupSet(String name, String[] tables) { + Put put = new Put(rowkey(SET_KEY_PREFIX, name)); + byte[] value = convertToByteArray(tables); + put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); + return put; + } + + private static byte[] convertToByteArray(String[] tables) { + return StringUtils.join(tables, ",").getBytes(); + } + + + /** + * Converts cell to backup set list. + * @param current - cell + * @return backup set + * @throws IOException + */ + static String[] cellValueToBackupSet(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + if( data != null && data.length > 0){ + return new String(data).split(","); + } else{ + return new String[0]; + } + } + + /** + * Converts cell key to backup set name. + * @param current - cell + * @return backup set name + * @throws IOException + */ + static String cellKeyToBackupSetName(Cell current) throws IOException { + byte[] data = CellUtil.cloneRow(current); + return new String(data).substring(SET_KEY_PREFIX.length()); + } + + static byte[] rowkey(String s, String ... other){ + StringBuilder sb = new StringBuilder(s); + for(String ss: other){ + sb.append(ss); + } + return sb.toString().getBytes(); + } + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java index 660a14fb9dd972e564cf3572614fa3ebe848b513..df2b0a6fb7dbdedc320dfb3f634d25fed6854a19 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; @@ -111,7 +113,7 @@ public final class BackupUtil { * @param rsLogTimestampMap timestamp map * @return the min timestamp of each RS */ - protected static HashMap getRSLogTimestampMins( + public static HashMap getRSLogTimestampMins( HashMap> rsLogTimestampMap) { if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) { @@ -152,7 +154,7 @@ public final class BackupUtil { * @throws IOException exception * @throws InterruptedException exception */ - protected static void copyTableRegionInfo(BackupContext backupContext, Configuration conf) + public static void copyTableRegionInfo(BackupInfo backupContext, Configuration conf) throws IOException, InterruptedException { Path rootDir = FSUtils.getRootDir(conf); @@ -277,109 +279,19 @@ public final class BackupUtil { return totalLength; } - /** - * Keep the record for dependency for incremental backup and history info p.s, we may be able to - * merge this class into backupImage class later - */ - public static class BackupCompleteData implements Comparable { - private String startTime; - private String endTime; - private String type; - private String backupRootPath; - private List tableList; - private String backupToken; - private String bytesCopied; - private List ancestors; - - public List getAncestors() { - if (this.ancestors == null) { - this.ancestors = new ArrayList(); - } - return this.ancestors; - } - - public void addAncestor(String backupToken) { - this.getAncestors().add(backupToken); - } - - public String getBytesCopied() { - return bytesCopied; - } - - public void setBytesCopied(String bytesCopied) { - this.bytesCopied = bytesCopied; - } - - public String getBackupToken() { - return backupToken; - } - - public void setBackupToken(String backupToken) { - this.backupToken = backupToken; - } - - public String getStartTime() { - return startTime; - } - - public void setStartTime(String startTime) { - this.startTime = startTime; - } - - public String getEndTime() { - return endTime; - } - - public void setEndTime(String endTime) { - this.endTime = endTime; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getBackupRootPath() { - return backupRootPath; - } - - public void setBackupRootPath(String backupRootPath) { - this.backupRootPath = backupRootPath; - } - - public List getTableList() { - return tableList; - } - - public void setTableList(List tableList) { - this.tableList = tableList; - } - - @Override - public int compareTo(BackupCompleteData o) { - Long thisTS = - new Long(this.getBackupToken().substring(this.getBackupToken().lastIndexOf("_") + 1)); - Long otherTS = - new Long(o.getBackupToken().substring(o.getBackupToken().lastIndexOf("_") + 1)); - return thisTS.compareTo(otherTS); - } - - } + /** * Sort history list by start time in descending order. * @param historyList history list * @return sorted list of BackupCompleteData */ - public static ArrayList sortHistoryListDesc( - ArrayList historyList) { - ArrayList list = new ArrayList(); - TreeMap map = new TreeMap(); - for (BackupCompleteData h : historyList) { - map.put(h.getStartTime(), h); + public static ArrayList sortHistoryListDesc( + ArrayList historyList) { + ArrayList list = new ArrayList(); + TreeMap map = new TreeMap(); + for (BackupInfo h : historyList) { + map.put(Long.toString(h.getStartTs()), h); } Iterator i = map.descendingKeySet().iterator(); while (i.hasNext()) { @@ -483,4 +395,78 @@ public final class BackupUtil { } return ret; } + + public static void cleanupBackupData(BackupInfo context, Configuration conf) + throws IOException + { + cleanupHLogDir(context, conf); + cleanupTargetDir(context, conf); + } + + /** + * Clean up directories which are generated when DistCp copying hlogs. + * @throws IOException + */ + private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf) + throws IOException { + + String logDir = backupContext.getHLogTargetDir(); + if (logDir == null) { + LOG.warn("No log directory specified for " + backupContext.getBackupId()); + return; + } + + Path rootPath = new Path(logDir).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + LOG.debug("Delete log files: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + + /** + * Clean up the data at target directory + */ + private static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + try { + // clean up the data at target directory + LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId()); + String targetDir = backupContext.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupContext.getBackupId()); + return; + } + + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java deleted file mode 100644 index 175f2bbf6c4c512f1d675b0c8d81f07055229da0..0000000000000000000000000000000000000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java +++ /dev/null @@ -1,745 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupClientUtil; -import org.apache.hadoop.hbase.backup.BackupRestoreFactory; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupPhase; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; -import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; -import org.apache.hadoop.hbase.procedure.MasterProcedureManager; -import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; - -@InterfaceAudience.Private -public class FullTableBackupProcedure - extends StateMachineProcedure - implements TableProcedureInterface { - private static final Log LOG = LogFactory.getLog(FullTableBackupProcedure.class); - - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Configuration conf; - private String backupId; - private List tableList; - private String targetRootDir; - HashMap newTimestamps = null; - - private BackupManager backupManager; - private BackupContext backupContext; - - public FullTableBackupProcedure() { - // Required by the Procedure framework to create the procedure on replay - } - - public FullTableBackupProcedure(final MasterProcedureEnv env, - final String backupId, List tableList, String targetRootDir, final int workers, - final long bandwidth) throws IOException { - backupManager = new BackupManager(env.getMasterConfiguration()); - this.backupId = backupId; - this.tableList = tableList; - this.targetRootDir = targetRootDir; - backupContext = - backupManager.createBackupContext(backupId, BackupType.FULL, tableList, targetRootDir); - if (tableList == null || tableList.isEmpty()) { - this.tableList = new ArrayList<>(backupContext.getTables()); - } - } - - @Override - public byte[] getResult() { - return backupId.getBytes(); - } - - /** - * Begin the overall backup. - * @param backupContext backup context - * @throws IOException exception - */ - static void beginBackup(BackupManager backupManager, BackupContext backupContext) - throws IOException { - backupManager.setBackupContext(backupContext); - // set the start timestamp of the overall backup - long startTs = EnvironmentEdgeManager.currentTime(); - backupContext.setStartTs(startTs); - // set overall backup status: ongoing - backupContext.setState(BackupState.RUNNING); - LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); - - backupManager.updateBackupStatus(backupContext); - if (LOG.isDebugEnabled()) { - LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); - } - } - - private static String getMessage(Exception e) { - String msg = e.getMessage(); - if (msg == null || msg.equals("")) { - msg = e.getClass().getName(); - } - return msg; - } - - /** - * Delete HBase snapshot for backup. - * @param backupCtx backup context - * @throws Exception exception - */ - private static void deleteSnapshot(final MasterProcedureEnv env, - BackupContext backupCtx, Configuration conf) - throws IOException { - LOG.debug("Trying to delete snapshot for full backup."); - for (String snapshotName : backupCtx.getSnapshotNames()) { - if (snapshotName == null) { - continue; - } - LOG.debug("Trying to delete snapshot: " + snapshotName); - HBaseProtos.SnapshotDescription.Builder builder = - HBaseProtos.SnapshotDescription.newBuilder(); - builder.setName(snapshotName); - try { - env.getMasterServices().getSnapshotManager().deleteSnapshot(builder.build()); - } catch (IOException ioe) { - LOG.debug("when deleting snapshot " + snapshotName, ioe); - } - LOG.debug("Deleting the snapshot " + snapshotName + " for backup " - + backupCtx.getBackupId() + " succeeded."); - } - } - - /** - * Clean up directories with prefix "exportSnapshot-", which are generated when exporting - * snapshots. - * @throws IOException exception - */ - private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { - FileSystem fs = FSUtils.getCurrentFileSystem(conf); - Path stagingDir = - new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() - .toString())); - FileStatus[] files = FSUtils.listStatus(fs, stagingDir); - if (files == null) { - return; - } - for (FileStatus file : files) { - if (file.getPath().getName().startsWith("exportSnapshot-")) { - LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); - if (FSUtils.delete(fs, file.getPath(), true) == false) { - LOG.warn("Can not delete " + file.getPath()); - } - } - } - } - - /** - * Clean up the uncompleted data at target directory if the ongoing backup has already entered the - * copy phase. - */ - static void cleanupTargetDir(BackupContext backupContext, Configuration conf) { - try { - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - LOG.debug("Trying to cleanup up target dir. Current backup phase: " - + backupContext.getPhase()); - if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) - || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) - || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { - FileSystem outputFs = - FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); - - // now treat one backup as a transaction, clean up data that has been partially copied at - // table level - for (TableName table : backupContext.getTables()) { - Path targetDirPath = - new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), - backupContext.getBackupId(), table)); - if (outputFs.delete(targetDirPath, true)) { - LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() - + " done."); - } else { - LOG.info("No data has been copied to " + targetDirPath.toString() + "."); - } - - Path tableDir = targetDirPath.getParent(); - FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); - if (backups == null || backups.length == 0) { - outputFs.delete(tableDir, true); - LOG.debug(tableDir.toString() + " is empty, remove it."); - } - } - } - - } catch (IOException e1) { - LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " - + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); - } - } - - /** - * Fail the overall backup. - * @param backupContext backup context - * @param e exception - * @throws Exception exception - */ - static void failBackup(final MasterProcedureEnv env, BackupContext backupContext, - BackupManager backupManager, Exception e, - String msg, BackupType type, Configuration conf) throws IOException { - LOG.error(msg + getMessage(e)); - // If this is a cancel exception, then we've already cleaned. - - if (backupContext.getState().equals(BackupState.CANCELLED)) { - return; - } - - // set the failure timestamp of the overall backup - backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); - - // set failure message - backupContext.setFailedMsg(e.getMessage()); - - // set overall backup status: failed - backupContext.setState(BackupState.FAILED); - - // compose the backup failed data - String backupFailedData = - "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() - + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() - + ",failedmessage=" + backupContext.getFailedMsg(); - LOG.error(backupFailedData); - - backupManager.updateBackupStatus(backupContext); - - // if full backup, then delete HBase snapshots if there already are snapshots taken - // and also clean up export snapshot log files if exist - if (type == BackupType.FULL) { - deleteSnapshot(env, backupContext, conf); - cleanupExportSnapshotLog(conf); - } - - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - cleanupTargetDir(backupContext, conf); - - LOG.info("Backup " + backupContext.getBackupId() + " failed."); - } - - /** - * Do snapshot copy. - * @param backupContext backup context - * @throws Exception exception - */ - private void snapshotCopy(BackupContext backupContext) throws Exception { - LOG.info("Snapshot copy is starting."); - - // set overall backup phase: snapshot_copy - backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); - - // avoid action if has been cancelled - if (backupContext.isCancelled()) { - return; - } - - // call ExportSnapshot to copy files based on hbase snapshot for backup - // ExportSnapshot only support single snapshot export, need loop for multiple tables case - BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); - - // number of snapshots matches number of tables - float numOfSnapshots = backupContext.getSnapshotNames().size(); - - LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); - - for (TableName table : backupContext.getTables()) { - // Currently we simply set the sub copy tasks by counting the table snapshot number, we can - // calculate the real files' size for the percentage in the future. - // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); - int res = 0; - String[] args = new String[4]; - args[0] = "-snapshot"; - args[1] = backupContext.getSnapshotName(table); - args[2] = "-copy-to"; - args[3] = backupContext.getBackupStatus(table).getTargetDir(); - - LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); - res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); - // if one snapshot export failed, do not continue for remained snapshots - if (res != 0) { - LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); - - throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] - + " with reason code " + res); - } - - LOG.info("Snapshot copy " + args[1] + " finished."); - } - } - - /** - * Add manifest for the current backup. The manifest is stored - * within the table backup directory. - * @param backupContext The current backup context - * @throws IOException exception - * @throws BackupException exception - */ - private static void addManifest(BackupContext backupContext, BackupManager backupManager, - BackupType type, Configuration conf) throws IOException, BackupException { - // set the overall backup phase : store manifest - backupContext.setPhase(BackupPhase.STORE_MANIFEST); - - // avoid action if has been cancelled - if (backupContext.isCancelled()) { - return; - } - - BackupManifest manifest; - - // Since we have each table's backup in its own directory structure, - // we'll store its manifest with the table directory. - for (TableName table : backupContext.getTables()) { - manifest = new BackupManifest(backupContext, table); - ArrayList ancestors = backupManager.getAncestors(backupContext, table); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - - if (type == BackupType.INCREMENTAL) { - // We'll store the log timestamps for this table only in its manifest. - HashMap> tableTimestampMap = - new HashMap>(); - tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); - manifest.setIncrTimestampMap(tableTimestampMap); - ArrayList ancestorss = backupManager.getAncestors(backupContext); - for (BackupImage image : ancestorss) { - manifest.addDependentImage(image); - } - } - // TODO - // manifest.setRelativeWALReferences(backupContext.getRelWALRefs()); - manifest.store(conf); - } - - // For incremental backup, we store a overall manifest in - // /WALs/ - // This is used when created the next incremental backup - if (type == BackupType.INCREMENTAL) { - manifest = new BackupManifest(backupContext); - // set the table region server start and end timestamps for incremental backup - manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); - ArrayList ancestors = backupManager.getAncestors(backupContext); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - // TODO - // manifest.setRelativeWALReferences(backupContext.getRelWALRefs()); - manifest.store(conf); - } - } - - /** - * Get backup request meta data dir as string. - * @param backupContext backup context - * @return meta data dir - */ - private static String obtainBackupMetaDataStr(BackupContext backupContext) { - StringBuffer sb = new StringBuffer(); - sb.append("type=" + backupContext.getType() + ",tablelist="); - for (TableName table : backupContext.getTables()) { - sb.append(table + ";"); - } - if (sb.lastIndexOf(";") > 0) { - sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); - } - sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); - - return sb.toString(); - } - - /** - * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying - * hlogs. - * @throws IOException exception - */ - private static void cleanupDistCpLog(BackupContext backupContext, Configuration conf) - throws IOException { - Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); - FileSystem fs = FileSystem.get(rootPath.toUri(), conf); - FileStatus[] files = FSUtils.listStatus(fs, rootPath); - if (files == null) { - return; - } - for (FileStatus file : files) { - if (file.getPath().getName().startsWith("_distcp_logs")) { - LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); - FSUtils.delete(fs, file.getPath(), true); - } - } - } - - /** - * Complete the overall backup. - * @param backupContext backup context - * @throws Exception exception - */ - static void completeBackup(final MasterProcedureEnv env, BackupContext backupContext, - BackupManager backupManager, BackupType type, Configuration conf) throws IOException { - // set the complete timestamp of the overall backup - backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); - // set overall backup status: complete - backupContext.setState(BackupState.COMPLETE); - // add and store the manifest for the backup - addManifest(backupContext, backupManager, type, conf); - - // after major steps done and manifest persisted, do convert if needed for incremental backup - /* in-fly convert code here, provided by future jira */ - LOG.debug("in-fly convert code here, provided by future jira"); - - // compose the backup complete data - String backupCompleteData = - obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() - + ",completets=" + backupContext.getEndTs() + ",bytescopied=" - + backupContext.getTotalBytesCopied(); - if (LOG.isDebugEnabled()) { - LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); - } - backupManager.updateBackupStatus(backupContext); - - // when full backup is done: - // - delete HBase snapshot - // - clean up directories with prefix "exportSnapshot-", which are generated when exporting - // snapshots - if (type == BackupType.FULL) { - deleteSnapshot(env, backupContext, conf); - cleanupExportSnapshotLog(conf); - } else if (type == BackupType.INCREMENTAL) { - cleanupDistCpLog(backupContext, conf); - } - - LOG.info("Backup " + backupContext.getBackupId() + " completed."); - } - - /** - * Wrap a SnapshotDescription for a target table. - * @param table table - * @return a SnapshotDescription especially for backup. - */ - static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { - // Mock a SnapshotDescription from backupContext to call SnapshotManager function, - // Name it in the format "snapshot__" - HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); - builder.setTable(tableName.getNameAsString()); - builder.setName(snapshotName); - HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); - - LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() - + " from backupContext to request snapshot for backup."); - - return backupSnapshot; - } - - @Override - protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state) - throws InterruptedException { - if (conf == null) { - conf = env.getMasterConfiguration(); - } - if (backupManager == null) { - try { - backupManager = new BackupManager(env.getMasterConfiguration()); - } catch (IOException ioe) { - setFailure("full backup", ioe); - return Flow.NO_MORE_STATE; - } - } - if (LOG.isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); - } - try { - switch (state) { - case PRE_SNAPSHOT_TABLE: - beginBackup(backupManager, backupContext); - String savedStartCode = null; - boolean firstBackup = false; - // do snapshot for full table backup - - try { - savedStartCode = backupManager.readBackupStartCode(); - firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L; - if (firstBackup) { - // This is our first backup. Let's put some marker on ZK so that we can hold the logs - // while we do the backup. - backupManager.writeBackupStartCode(0L); - } - // We roll log here before we do the snapshot. It is possible there is duplicate data - // in the log that is already in the snapshot. But if we do it after the snapshot, we - // could have data loss. - // A better approach is to do the roll log on each RS in the same global procedure as - // the snapshot. - LOG.info("Execute roll log procedure for full backup ..."); - MasterProcedureManager mpm = env.getMasterServices().getMasterProcedureManagerHost() - .getProcedureManager(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); - Map props= new HashMap(); - long waitTime = MasterProcedureUtil.execProcedure(mpm, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); - MasterProcedureUtil.waitForProcedure(mpm, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props, waitTime, - conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER), - conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE)); - - newTimestamps = backupManager.readRegionServerLastLogRollResult(); - if (firstBackup) { - // Updates registered log files - // We record ALL old WAL files as registered, because - // this is a first full backup in the system and these - // files are not needed for next incremental backup - List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps); - backupManager.recordWALFiles(logFiles); - } - } catch (BackupException e) { - // fail the overall backup and return - failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", - BackupType.FULL, conf); - return Flow.NO_MORE_STATE; - } - setNextState(FullTableBackupState.SNAPSHOT_TABLES); - break; - case SNAPSHOT_TABLES: - for (TableName tableName : tableList) { - String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) - + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); - HBaseProtos.SnapshotDescription backupSnapshot; - - // wrap a SnapshotDescription for offline/online snapshot - backupSnapshot = wrapSnapshotDescription(tableName,snapshotName); - try { - env.getMasterServices().getSnapshotManager().deleteSnapshot(backupSnapshot); - } catch (IOException e) { - LOG.debug("Unable to delete " + snapshotName, e); - } - // Kick off snapshot for backup - try { - env.getMasterServices().getSnapshotManager().takeSnapshot(backupSnapshot); - } catch (IOException e) { - LOG.debug("Unable to take snapshot: " + snapshotName, e); - } - long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout( - env.getMasterConfiguration(), - backupSnapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); - BackupUtil.waitForSnapshot(backupSnapshot, waitTime, - env.getMasterServices().getSnapshotManager(), env.getMasterConfiguration()); - // set the snapshot name in BackupStatus of this table, only after snapshot success. - backupContext.setSnapshotName(tableName, backupSnapshot.getName()); - } - setNextState(FullTableBackupState.SNAPSHOT_COPY); - break; - case SNAPSHOT_COPY: - // do snapshot copy - LOG.debug("snapshot copy for " + backupId); - try { - this.snapshotCopy(backupContext); - } catch (Exception e) { - // fail the overall backup and return - failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", - BackupType.FULL, conf); - return Flow.NO_MORE_STATE; - } - // Updates incremental backup table set - backupManager.addIncrementalBackupTableSet(backupContext.getTables()); - setNextState(FullTableBackupState.BACKUP_COMPLETE); - break; - - case BACKUP_COMPLETE: - // set overall backup status: complete. Here we make sure to complete the backup. - // After this checkpoint, even if entering cancel process, will let the backup finished - backupContext.setState(BackupState.COMPLETE); - // The table list in backupContext is good for both full backup and incremental backup. - // For incremental backup, it contains the incremental backup table set. - backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); - - HashMap> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); - - Long newStartCode = - BackupClientUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); - backupManager.writeBackupStartCode(newStartCode); - - // backup complete - completeBackup(env, backupContext, backupManager, BackupType.FULL, conf); - return Flow.NO_MORE_STATE; - - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } catch (IOException e) { - LOG.error("Backup failed in " + state); - setFailure("snapshot-table", e); - } - return Flow.HAS_MORE_STATE; - } - - @Override - protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state) - throws IOException { - if (state != FullTableBackupState.PRE_SNAPSHOT_TABLE) { - deleteSnapshot(env, backupContext, conf); - cleanupExportSnapshotLog(conf); - } - - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - if (state == FullTableBackupState.SNAPSHOT_COPY) { - cleanupTargetDir(backupContext, conf); - } - } - - @Override - protected FullTableBackupState getState(final int stateId) { - return FullTableBackupState.valueOf(stateId); - } - - @Override - protected int getStateId(final FullTableBackupState state) { - return state.getNumber(); - } - - @Override - protected FullTableBackupState getInitialState() { - return FullTableBackupState.PRE_SNAPSHOT_TABLE; - } - - @Override - protected void setNextState(final FullTableBackupState state) { - if (aborted.get()) { - setAbortFailure("backup-table", "abort requested"); - } else { - super.setNextState(state); - } - } - - @Override - public boolean abort(final MasterProcedureEnv env) { - aborted.set(true); - return true; - } - - @Override - public void toStringClassDetails(StringBuilder sb) { - sb.append(getClass().getSimpleName()); - sb.append(" (targetRootDir="); - sb.append(targetRootDir); - sb.append(")"); - } - - BackupProtos.BackupProcContext toBackupContext() { - BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); - ctxBuilder.setCtx(backupContext.toBackupContext()); - if (newTimestamps != null && !newTimestamps.isEmpty()) { - BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); - for (Entry entry : newTimestamps.entrySet()) { - tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); - ctxBuilder.addServerTimestamp(tsBuilder.build()); - } - } - return ctxBuilder.build(); - } - - @Override - public void serializeStateData(final OutputStream stream) throws IOException { - super.serializeStateData(stream); - - BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); - backupProcCtx.writeDelimitedTo(stream); - } - - @Override - public void deserializeStateData(final InputStream stream) throws IOException { - super.deserializeStateData(stream); - - BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); - backupContext = BackupContext.fromProto(proto.getCtx()); - backupId = backupContext.getBackupId(); - targetRootDir = backupContext.getTargetRootDir(); - tableList = backupContext.getTableNames(); - List svrTimestamps = proto.getServerTimestampList(); - if (svrTimestamps != null && !svrTimestamps.isEmpty()) { - newTimestamps = new HashMap<>(); - for (ServerTimestamp ts : svrTimestamps) { - newTimestamps.put(ts.getServer(), ts.getTimestamp()); - } - } - } - - @Override - public TableName getTableName() { - return TableName.BACKUP_TABLE_NAME; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.BACKUP; - } - - @Override - protected boolean acquireLock(final MasterProcedureEnv env) { - if (env.waitInitialized(this)) { - return false; - } - return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } - - @Override - protected void releaseLock(final MasterProcedureEnv env) { - env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 61e15c437df60c6dd504cda81c29e15f1290469a..8338fee38ff97087aefee3963346f4d68ca1cdec 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem; /** * After a full backup was created, the incremental backup will only store the changes made @@ -71,7 +73,7 @@ public class IncrementalBackupManager { * @return The new HashMap of RS log timestamps after the log roll for this incremental backup. * @throws IOException exception */ - public HashMap getIncrBackupLogFileList(BackupContext backupContext) + public HashMap getIncrBackupLogFileList(BackupInfo backupContext) throws IOException { List logList; HashMap newTimestamps; @@ -84,7 +86,7 @@ public class IncrementalBackupManager { HashMap> previousTimestampMap = backupManager.readLogTimestampMap(); - previousTimestampMins = BackupUtil.getRSLogTimestampMins(previousTimestampMap); + previousTimestampMins = BackupUtil.getRSLogTimestampMins(previousTimestampMap); if (LOG.isDebugEnabled()) { LOG.debug("StartCode " + savedStartCode + "for backupID " + backupContext.getBackupId()); @@ -99,49 +101,88 @@ public class IncrementalBackupManager { try (Admin admin = conn.getAdmin()) { LOG.info("Execute roll log procedure for incremental backup ..."); + HashMap props = new HashMap(); + props.put("backupRoot", backupContext.getTargetRootDir()); admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap()); + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); } newTimestamps = backupManager.readRegionServerLastLogRollResult(); logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); - logList.addAll(getLogFilesFromBackupSystem(previousTimestampMins, newTimestamps)); + List logFromSystemTable = + getLogFilesFromBackupSystem(previousTimestampMins, + newTimestamps, backupManager.getBackupContext().getTargetRootDir()); + addLogsFromBackupSystemToContext(logFromSystemTable); + + logList = excludeAlreadyBackedUpWALs(logList, logFromSystemTable); backupContext.setIncrBackupFileList(logList); return newTimestamps; } + + private List excludeAlreadyBackedUpWALs(List logList, + List logFromSystemTable) { + + List backupedWALList = toWALList(logFromSystemTable); + logList.removeAll(backupedWALList); + return logList; + } + + private List toWALList(List logFromSystemTable) { + + List list = new ArrayList(logFromSystemTable.size()); + for(WALItem item : logFromSystemTable){ + list.add(item.getWalFile()); + } + return list; + } + + private void addLogsFromBackupSystemToContext(List logFromSystemTable) { + List walFiles = new ArrayList(); + for(WALItem item : logFromSystemTable){ + Path p = new Path(item.getWalFile()); + String walFileName = p.getName(); + String backupId = item.getBackupId(); + String relWALPath = backupId + Path.SEPARATOR+walFileName; + walFiles.add(relWALPath); + } + } + + /** - * For each region server: get all log files newer than the last timestamps but not newer than the - * newest timestamps. FROM hbase:backup table + * For each region server: get all log files newer than the last timestamps, + * but not newer than the newest timestamps. FROM hbase:backup table * @param olderTimestamps - the timestamp for each region server of the last backup. * @param newestTimestamps - the timestamp for each region server that the backup should lead to. * @return list of log files which needs to be added to this backup * @throws IOException */ - private List getLogFilesFromBackupSystem(HashMap olderTimestamps, - HashMap newestTimestamps) throws IOException { - List logFiles = new ArrayList(); - Iterator it = backupManager.getWALFilesFromBackupSystem(); - + private List getLogFilesFromBackupSystem(HashMap olderTimestamps, + HashMap newestTimestamps, String backupRoot) throws IOException { + List logFiles = new ArrayList(); + Iterator it = backupManager.getWALFilesFromBackupSystem(); while (it.hasNext()) { - String walFileName = it.next(); + WALItem item = it.next(); + String rootDir = item.getBackupRoot(); + if(!rootDir.equals(backupRoot)) { + continue; + } + String walFileName = item.getWalFile(); String server = BackupUtil.parseHostNameFromLogFile(new Path(walFileName)); - //String server = getServer(walFileName); Long tss = getTimestamp(walFileName); Long oldTss = olderTimestamps.get(server); + Long newTss = newestTimestamps.get(server); if (oldTss == null){ - logFiles.add(walFileName); + logFiles.add(item); continue; } - Long newTss = newestTimestamps.get(server); if (newTss == null) { newTss = Long.MAX_VALUE; } - if (tss > oldTss && tss < newTss) { - logFiles.add(walFileName); + logFiles.add(item); } } return logFiles; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java index 12ecbe9dd5c5dc0d754a0a420fa1a71ea82d1a7e..89041841b4cdf90db39afb714bbbdf93bb13efe4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -29,6 +30,13 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public interface IncrementalRestoreService extends Configurable{ - public void run(String logDirectory, TableName[] fromTables, TableName[] toTables) + /** + * Run restore operation + * @param logDirectoryPaths - path array of WAL log directories + * @param fromTables - from tables + * @param toTables - to tables + * @throws IOException + */ + public void run(Path[] logDirectoryPaths, TableName[] fromTables, TableName[] toTables) throws IOException; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java deleted file mode 100644 index 8c1258292391db9e3b733bcab89a75ecb7fdc594..0000000000000000000000000000000000000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java +++ /dev/null @@ -1,325 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupClientUtil; -import org.apache.hadoop.hbase.backup.BackupRestoreFactory; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupPhase; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; -import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; - -@InterfaceAudience.Private -public class IncrementalTableBackupProcedure - extends StateMachineProcedure - implements TableProcedureInterface { - private static final Log LOG = LogFactory.getLog(IncrementalTableBackupProcedure.class); - - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Configuration conf; - private String backupId; - private List tableList; - private String targetRootDir; - HashMap newTimestamps = null; - - private BackupManager backupManager; - private BackupContext backupContext; - - public IncrementalTableBackupProcedure() { - // Required by the Procedure framework to create the procedure on replay - } - - public IncrementalTableBackupProcedure(final MasterProcedureEnv env, - final String backupId, - List tableList, String targetRootDir, final int workers, - final long bandwidth) throws IOException { - backupManager = new BackupManager(env.getMasterConfiguration()); - this.backupId = backupId; - this.tableList = tableList; - this.targetRootDir = targetRootDir; - backupContext = backupManager.createBackupContext(backupId, BackupType.INCREMENTAL, tableList, - targetRootDir); - } - - @Override - public byte[] getResult() { - return backupId.getBytes(); - } - - private List filterMissingFiles(List incrBackupFileList) throws IOException { - FileSystem fs = FileSystem.get(conf); - List list = new ArrayList(); - for(String file : incrBackupFileList){ - if(fs.exists(new Path(file))){ - list.add(file); - } else{ - LOG.warn("Can't find file: "+file); - } - } - return list; - } - - /** - * Do incremental copy. - * @param backupContext backup context - */ - private void incrementalCopy(BackupContext backupContext) throws Exception { - - LOG.info("Incremental copy is starting."); - - // set overall backup phase: incremental_copy - backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); - - // avoid action if has been cancelled - if (backupContext.isCancelled()) { - return; - } - - // get incremental backup file list and prepare parms for DistCp - List incrBackupFileList = backupContext.getIncrBackupFileList(); - // filter missing files out (they have been copied by previous backups) - incrBackupFileList = filterMissingFiles(incrBackupFileList); - String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); - strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); - - BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); - int res = copyService.copy(backupContext, backupManager, conf, - BackupCopyService.Type.INCREMENTAL, strArr); - - if (res != 0) { - LOG.error("Copy incremental log files failed with return code: " + res + "."); - throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to " - + backupContext.getHLogTargetDir()); - } - LOG.info("Incremental copy from " + incrBackupFileList + " to " - + backupContext.getHLogTargetDir() + " finished."); - } - - @Override - protected Flow executeFromState(final MasterProcedureEnv env, - final IncrementalTableBackupState state) - throws InterruptedException { - if (conf == null) { - conf = env.getMasterConfiguration(); - } - if (backupManager == null) { - try { - backupManager = new BackupManager(env.getMasterConfiguration()); - } catch (IOException ioe) { - setFailure("incremental backup", ioe); - } - } - if (LOG.isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); - } - try { - switch (state) { - case PREPARE_INCREMENTAL: - FullTableBackupProcedure.beginBackup(backupManager, backupContext); - LOG.debug("For incremental backup, current table set is " - + backupManager.getIncrementalBackupTableSet()); - try { - IncrementalBackupManager incrBackupManager =new IncrementalBackupManager(backupManager); - - newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext); - } catch (Exception e) { - // fail the overall backup and return - FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, - "Unexpected Exception : ", BackupType.INCREMENTAL, conf); - } - - setNextState(IncrementalTableBackupState.INCREMENTAL_COPY); - break; - case INCREMENTAL_COPY: - try { - // copy out the table and region info files for each table - BackupUtil.copyTableRegionInfo(backupContext, conf); - incrementalCopy(backupContext); - // Save list of WAL files copied - backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); - } catch (Exception e) { - // fail the overall backup and return - FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, - "Unexpected exception doing incremental copy : ", BackupType.INCREMENTAL, conf); - } - setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE); - break; - case INCR_BACKUP_COMPLETE: - // set overall backup status: complete. Here we make sure to complete the backup. - // After this checkpoint, even if entering cancel process, will let the backup finished - backupContext.setState(BackupState.COMPLETE); - // Set the previousTimestampMap which is before this current log roll to the manifest. - HashMap> previousTimestampMap = - backupManager.readLogTimestampMap(); - backupContext.setIncrTimestampMap(previousTimestampMap); - - // The table list in backupContext is good for both full backup and incremental backup. - // For incremental backup, it contains the incremental backup table set. - backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); - - HashMap> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); - - Long newStartCode = BackupClientUtil - .getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); - backupManager.writeBackupStartCode(newStartCode); - // backup complete - FullTableBackupProcedure.completeBackup(env, backupContext, backupManager, - BackupType.INCREMENTAL, conf); - return Flow.NO_MORE_STATE; - - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } catch (IOException e) { - setFailure("snapshot-table", e); - } - return Flow.HAS_MORE_STATE; - } - - @Override - protected void rollbackState(final MasterProcedureEnv env, - final IncrementalTableBackupState state) throws IOException { - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - FullTableBackupProcedure.cleanupTargetDir(backupContext, conf); - } - - @Override - protected IncrementalTableBackupState getState(final int stateId) { - return IncrementalTableBackupState.valueOf(stateId); - } - - @Override - protected int getStateId(final IncrementalTableBackupState state) { - return state.getNumber(); - } - - @Override - protected IncrementalTableBackupState getInitialState() { - return IncrementalTableBackupState.PREPARE_INCREMENTAL; - } - - @Override - protected void setNextState(final IncrementalTableBackupState state) { - if (aborted.get()) { - setAbortFailure("snapshot-table", "abort requested"); - } else { - super.setNextState(state); - } - } - - @Override - public boolean abort(final MasterProcedureEnv env) { - aborted.set(true); - return true; - } - - @Override - public void toStringClassDetails(StringBuilder sb) { - sb.append(getClass().getSimpleName()); - sb.append(" (targetRootDir="); - sb.append(targetRootDir); - sb.append(")"); - } - - BackupProtos.BackupProcContext toBackupContext() { - BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); - ctxBuilder.setCtx(backupContext.toBackupContext()); - if (newTimestamps != null && !newTimestamps.isEmpty()) { - BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); - for (Entry entry : newTimestamps.entrySet()) { - tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); - ctxBuilder.addServerTimestamp(tsBuilder.build()); - } - } - return ctxBuilder.build(); - } - - @Override - public void serializeStateData(final OutputStream stream) throws IOException { - super.serializeStateData(stream); - - BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); - backupProcCtx.writeDelimitedTo(stream); - } - - @Override - public void deserializeStateData(final InputStream stream) throws IOException { - super.deserializeStateData(stream); - - BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); - backupContext = BackupContext.fromProto(proto.getCtx()); - backupId = backupContext.getBackupId(); - targetRootDir = backupContext.getTargetRootDir(); - tableList = backupContext.getTableNames(); - List svrTimestamps = proto.getServerTimestampList(); - if (svrTimestamps != null && !svrTimestamps.isEmpty()) { - newTimestamps = new HashMap<>(); - for (ServerTimestamp ts : svrTimestamps) { - newTimestamps.put(ts.getServer(), ts.getTimestamp()); - } - } - } - - @Override - public TableName getTableName() { - return TableName.BACKUP_TABLE_NAME; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.BACKUP; - } - - @Override - protected boolean acquireLock(final MasterProcedureEnv env) { - if (env.waitInitialized(this)) { - return false; - } - return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } - - @Override - protected void releaseLock(final MasterProcedureEnv env) { - env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java index c0c5220d73ea24c9c55b6733a9bad1477fe199e9..f16d213754971f65ba39b6dc7fa2d02103464114 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java @@ -22,11 +22,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map.Entry; -import java.util.Set; import java.util.TreeSet; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -52,7 +53,6 @@ public final class RestoreClientImpl implements RestoreClient { private static final Log LOG = LogFactory.getLog(RestoreClientImpl.class); private Configuration conf; - private Set lastRestoreImagesSet; public RestoreClientImpl() { } @@ -111,11 +111,10 @@ public final class RestoreClientImpl implements RestoreClient { checkTargetTables(tTableArray, isOverwrite); // start restore process - Set restoreImageSet = - restoreStage(backupManifestMap, sTableArray, tTableArray, autoRestore); + + restoreStage(backupManifestMap, sTableArray, tTableArray, autoRestore); LOG.info("Restore for " + Arrays.asList(sTableArray) + " are successful!"); - lastRestoreImagesSet = restoreImageSet; } catch (IOException e) { LOG.error("ERROR: restore failed with error: " + e.getMessage()); @@ -126,13 +125,6 @@ public final class RestoreClientImpl implements RestoreClient { return false; } - /** - * Get last restore image set. The value is globally set for the latest finished restore. - * @return the last restore image set - */ - public Set getLastRestoreImagesSet() { - return lastRestoreImagesSet; - } private boolean validate(HashMap backupManifestMap) throws IOException { @@ -147,10 +139,6 @@ public final class RestoreClientImpl implements RestoreClient { imageSet.addAll(depList); } - // todo merge - LOG.debug("merge will be implemented in future jira"); - // BackupUtil.clearMergedImages(table, imageSet, conf); - LOG.info("Dependent image(s) from old to new:"); for (BackupImage image : imageSet) { String imageDir = @@ -164,6 +152,7 @@ public final class RestoreClientImpl implements RestoreClient { LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); } } + return isValid; } @@ -189,7 +178,7 @@ public final class RestoreClientImpl implements RestoreClient { } } else { LOG.info("HBase table " + tableName - + " does not exist. It will be create during backup process"); + + " does not exist. It will be created during restore process"); } } } @@ -223,54 +212,57 @@ public final class RestoreClientImpl implements RestoreClient { * @return set of BackupImages restored * @throws IOException exception */ - private Set restoreStage( + private void restoreStage( HashMap backupManifestMap, TableName[] sTableArray, TableName[] tTableArray, boolean autoRestore) throws IOException { TreeSet restoreImageSet = new TreeSet(); - - for (int i = 0; i < sTableArray.length; i++) { - restoreImageSet.clear(); - TableName table = sTableArray[i]; - BackupManifest manifest = backupManifestMap.get(table); - if (autoRestore) { - // Get the image list of this backup for restore in time order from old - // to new. - TreeSet restoreList = - new TreeSet(manifest.getDependentListByTable(table)); - LOG.debug("need to clear merged Image. to be implemented in future jira"); - - for (BackupImage image : restoreList) { + try { + for (int i = 0; i < sTableArray.length; i++) { + TableName table = sTableArray[i]; + BackupManifest manifest = backupManifestMap.get(table); + if (autoRestore) { + // Get the image list of this backup for restore in time order from old + // to new. + List list = new ArrayList(); + list.add(manifest.getBackupImage()); + List depList = manifest.getDependentListByTable(table); + list.addAll(depList); + TreeSet restoreList = new TreeSet(list); + LOG.debug("need to clear merged Image. to be implemented in future jira"); + restoreImages(restoreList.iterator(), table, tTableArray[i]); + restoreImageSet.addAll(restoreList); + } else { + BackupImage image = manifest.getBackupImage(); + List depList = manifest.getDependentListByTable(table); + // The dependency list always contains self. + if (depList != null && depList.size() > 1) { + LOG.warn("Backup image " + image.getBackupId() + " depends on other images.\n" + + "this operation will only restore the delta contained within backupImage " + + image.getBackupId()); + } restoreImage(image, table, tTableArray[i]); + restoreImageSet.add(image); } - restoreImageSet.addAll(restoreList); - } else { - BackupImage image = manifest.getBackupImage(); - List depList = manifest.getDependentListByTable(table); - // The dependency list always contains self. - if (depList != null && depList.size() > 1) { - LOG.warn("Backup image " + image.getBackupId() + " depends on other images.\n" - + "this operation will only restore the delta contained within backupImage " - + image.getBackupId()); - } - restoreImage(image, table, tTableArray[i]); - restoreImageSet.add(image); - } - if (autoRestore) { - if (restoreImageSet != null && !restoreImageSet.isEmpty()) { - LOG.info("Restore includes the following image(s):"); - for (BackupImage image : restoreImageSet) { - LOG.info(" Backup: " - + image.getBackupId() - + " " - + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), - table)); + if (autoRestore) { + if (restoreImageSet != null && !restoreImageSet.isEmpty()) { + LOG.info("Restore includes the following image(s):"); + for (BackupImage image : restoreImageSet) { + LOG.info("Backup: " + + image.getBackupId() + + " " + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + table)); + } } } } - + } catch (Exception e) { + LOG.error("Failed", e); + throw new IOException(e); } - return restoreImageSet; + LOG.debug("restoreStage finished"); + } /** @@ -289,9 +281,9 @@ public final class RestoreClientImpl implements RestoreClient { RestoreUtil restoreTool = new RestoreUtil(conf, rootPath, backupId); BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, rootPath, backupId); - Path tableBackupPath = HBackupFileSystem.getTableBackupPath(rootPath, sTable, backupId); + Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, rootPath, backupId); - // todo: convert feature will be provided in a future jira + // TODO: convert feature will be provided in a future JIRA boolean converted = false; if (manifest.getType() == BackupType.FULL || converted) { @@ -303,10 +295,66 @@ public final class RestoreClientImpl implements RestoreClient { HBackupFileSystem.getLogBackupDir(image.getRootDir(), image.getBackupId()); LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from incremental backup image " + logBackupDir); - restoreTool.incrementalRestoreTable(logBackupDir, new TableName[] { sTable }, + restoreTool.incrementalRestoreTable(new Path[]{ new Path(logBackupDir)}, new TableName[] { sTable }, new TableName[] { tTable }); } LOG.info(sTable + " has been successfully restored to " + tTable); } + + /** + * Restore operation handle each backupImage in iterator + * @param it: backupImage iterator - ascending + * @param sTable: table to be restored + * @param tTable: table to be restored to + * @throws IOException exception + */ + private void restoreImages(Iterator it, TableName sTable, TableName tTable) + throws IOException { + + // First image MUST be image of a FULL backup + BackupImage image = it.next(); + + String rootDir = image.getRootDir(); + String backupId = image.getBackupId(); + Path backupRoot = new Path(rootDir); + + // We need hFS only for full restore (see the code) + RestoreUtil restoreTool = new RestoreUtil(conf, backupRoot, backupId); + BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); + + Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); + + // TODO: convert feature will be provided in a future JIRA + boolean converted = false; + + if (manifest.getType() == BackupType.FULL || converted) { + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " + + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); + restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, converted); + + } else { // incremental Backup + throw new IOException("Unexpected backup type " + image.getType()); + } + + // The rest one are incremental + if (it.hasNext()) { + List logDirList = new ArrayList(); + while (it.hasNext()) { + BackupImage im = it.next(); + String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); + logDirList.add(logBackupDir); + } + String logDirs = StringUtils.join(logDirList, ","); + LOG.info("Restoring '" + sTable + "' to '" + tTable + + "' from log dirs: " + logDirs); + String[] sarr = new String[logDirList.size()]; + logDirList.toArray(sarr); + Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr); + restoreTool.incrementalRestoreTable(paths, new TableName[] { sTable }, + new TableName[] { tTable }); + } + LOG.info(sTable + " has been successfully restored to " + tTable); + } + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java index 9139b6ede0f2b079c1e55c6a46636abb5ff39a89..592770b06e980c6508abdf9b4feb45db6f145f59 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java @@ -98,7 +98,7 @@ public class RestoreUtil { */ Path getTableArchivePath(TableName tableName) throws IOException { - Path baseDir = new Path(HBackupFileSystem.getTableBackupPath(backupRootPath, tableName, + Path baseDir = new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), HConstants.HFILE_ARCHIVE_DIRECTORY); Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); @@ -140,11 +140,11 @@ public class RestoreUtil { * @param newTableNames : target tableNames(table names to be restored to) * @throws IOException exception */ - void incrementalRestoreTable(String logDir, + void incrementalRestoreTable(Path[] logDirs, TableName[] tableNames, TableName[] newTableNames) throws IOException { if (tableNames.length != newTableNames.length) { - throw new IOException("Number of source tables adn taget Tables does not match!"); + throw new IOException("Number of source tables and target tables does not match!"); } // for incremental backup image, expect the table already created either by user or previous @@ -161,7 +161,7 @@ public class RestoreUtil { IncrementalRestoreService restoreService = BackupRestoreFactory.getIncrementalRestoreService(conf); - restoreService.run(logDir, tableNames, newTableNames); + restoreService.run(logDirs, tableNames, newTableNames); } } @@ -180,7 +180,7 @@ public class RestoreUtil { */ static Path getTableSnapshotPath(Path backupRootPath, TableName tableName, String backupId) { - return new Path(HBackupFileSystem.getTableBackupPath(backupRootPath, tableName, backupId), + return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), HConstants.SNAPSHOT_DIR_NAME); } @@ -221,10 +221,12 @@ public class RestoreUtil { SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc); HTableDescriptor tableDescriptor = manifest.getTableDescriptor(); - if (!tableDescriptor.getNameAsString().equals(tableName)) { + if (!tableDescriptor.getTableName().equals(tableName)) { LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + tableInfoPath.toString()); LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString()); + throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + + " under tableInfoPath: " + tableInfoPath.toString()); } return tableDescriptor; } @@ -464,6 +466,9 @@ public class RestoreUtil { this.conf.setInt("hbase.rpc.timeout", resultMillis); } + // By default, it is 32 and loader will fail if # of files in any region exceed this + // limit. Bad for snapshot restore. + this.conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE); LoadIncrementalHFiles loader = null; try { loader = new LoadIncrementalHFiles(this.conf); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java index fdd827211a8038cec9050abe62e4f553c4d5c177..dceb88f52d474123111f9e37b64525e33c7ad0e8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java @@ -28,19 +28,26 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupContext; +import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.impl.BackupCopyService; import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.snapshot.ExportSnapshot; +import org.apache.hadoop.mapreduce.Cluster; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobID; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.util.ClassUtil; import org.apache.zookeeper.KeeperException.NoNodeException; /** * Copier for backup operation. Basically, there are 2 types of copy. One is copying from snapshot, @@ -101,10 +108,10 @@ public class MapReduceBackupCopyService implements BackupCopyService { } class SnapshotCopy extends ExportSnapshot { - private BackupContext backupContext; + private BackupInfo backupContext; private TableName table; - public SnapshotCopy(BackupContext backupContext, TableName table) { + public SnapshotCopy(BackupInfo backupContext, TableName table) { super(); this.backupContext = backupContext; this.table = table; @@ -123,13 +130,13 @@ public class MapReduceBackupCopyService implements BackupCopyService { * @param bytesCopied bytes copied * @throws NoNodeException exception */ - static void updateProgress(BackupContext backupContext, BackupManager backupManager, + static void updateProgress(BackupInfo backupContext, BackupManager backupManager, int newProgress, long bytesCopied) throws IOException { // compose the new backup progress data, using fake number for now String backupProgressData = newProgress + "%"; backupContext.setProgress(newProgress); - backupManager.updateBackupStatus(backupContext); + backupManager.updateBackupInfo(backupContext); LOG.debug("Backup progress data \"" + backupProgressData + "\" has been updated to hbase:backup for " + backupContext.getBackupId()); } @@ -142,10 +149,10 @@ public class MapReduceBackupCopyService implements BackupCopyService { // no more DistCp options. class BackupDistCp extends DistCp { - private BackupContext backupContext; + private BackupInfo backupContext; private BackupManager backupManager; - public BackupDistCp(Configuration conf, DistCpOptions options, BackupContext backupContext, + public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backupContext, BackupManager backupManager) throws Exception { super(conf, options); @@ -189,13 +196,13 @@ public class MapReduceBackupCopyService implements BackupCopyService { // Don't cleanup while we are setting up. fieldMetaFolder.set(this, methodCreateMetaFolderPath.invoke(this)); fieldJobFS.set(this, ((Path) fieldMetaFolder.get(this)).getFileSystem(getConf())); - job = (Job) methodCreateJob.invoke(this); } methodCreateInputFileListing.invoke(this, job); // Get the total length of the source files List srcs = ((DistCpOptions) fieldInputOptions.get(this)).getSourcePaths(); + long totalSrcLgth = 0; for (Path aSrc : srcs) { totalSrcLgth += BackupUtil.getFilesLength(aSrc.getFileSystem(getConf()), aSrc); @@ -230,7 +237,6 @@ public class MapReduceBackupCopyService implements BackupCopyService { } Thread.sleep(progressReportFreq); } - // update the progress data after copy job complete float newProgress = progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); @@ -264,6 +270,7 @@ public class MapReduceBackupCopyService implements BackupCopyService { } + /** * Do backup copy based on different types. * @param context The backup context @@ -273,7 +280,7 @@ public class MapReduceBackupCopyService implements BackupCopyService { * @throws Exception exception */ @Override - public int copy(BackupContext context, BackupManager backupManager, Configuration conf, + public int copy(BackupInfo context, BackupManager backupManager, Configuration conf, BackupCopyService.Type copyType, String[] options) throws IOException { int res = 0; @@ -285,6 +292,7 @@ public class MapReduceBackupCopyService implements BackupCopyService { // Make a new instance of conf to be used by the snapshot copy class. snapshotCp.setConf(new Configuration(conf)); res = snapshotCp.run(options); + } else if (copyType == Type.INCREMENTAL) { LOG.debug("Doing COPY_TYPE_DISTCP"); setSubTaskPercntgInWholeTask(1f); @@ -303,7 +311,6 @@ public class MapReduceBackupCopyService implements BackupCopyService { destfs.mkdirs(dest); } } - res = distcp.run(options); } return res; @@ -313,4 +320,25 @@ public class MapReduceBackupCopyService implements BackupCopyService { } } + @Override + public void cancelCopyJob(String jobId) throws IOException { + JobID id = JobID.forName(jobId); + Cluster cluster = new Cluster(getConf()); + try { + Job job = cluster.getJob(id); + if (job == null) { + LOG.error("No job found for " + id); + // should we throw exception + } + if (job.isComplete() || job.isRetired()) { + return; + } + + job.killJob(); + LOG.debug("Killed job " + id); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java index 203c9a3dc3e8a1e0940eb0a26f96796118e1e9ab..8cb812529bedcaf06fae4fac53fc92a23b54eadc 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java @@ -19,16 +19,21 @@ package org.apache.hadoop.hbase.backup.mapreduce; import java.io.IOException; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupUtil; import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; import org.apache.hadoop.hbase.mapreduce.WALPlayer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @InterfaceAudience.Private @InterfaceStability.Evolving @@ -42,24 +47,97 @@ public class MapReduceRestoreService implements IncrementalRestoreService { } @Override - public void run(String logDir, TableName[] tableNames, TableName[] newTableNames) throws IOException { - String tableStr = BackupUtil.join(tableNames); - String newTableStr = BackupUtil.join(newTableNames); + public void run(Path[] logDirPaths, TableName[] tableNames, TableName[] newTableNames) + throws IOException { // WALPlayer reads all files in arbitrary directory structure and creates a Map task for each // log file + String logDirs = StringUtils.join(logDirPaths, ","); + LOG.info("Restore incremental backup from directory " + logDirs + " from hbase tables " + + BackupUtil.join(tableNames) + " to tables " + BackupUtil.join(newTableNames)); - String[] playerArgs = { logDir, tableStr, newTableStr }; - LOG.info("Restore incremental backup from directory " + logDir + " from hbase tables " - + BackupUtil.join(tableNames) + " to tables " - + BackupUtil.join(newTableNames)); + for (int i = 0; i < tableNames.length; i++) { + + LOG.info("Restore "+ tableNames[i] + " into "+ newTableNames[i]); + + Path bulkOutputPath = getBulkOutputDir(newTableNames[i].getNameAsString()); + String[] playerArgs = + { logDirs, tableNames[i].getNameAsString(), newTableNames[i].getNameAsString()}; + + int result = 0; + int loaderResult = 0; + try { + Configuration conf = getConf(); + conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); + player.setConf(getConf()); + result = player.run(playerArgs); + if (succeeded(result)) { + // do bulk load + LoadIncrementalHFiles loader = createLoader(); + if (LOG.isDebugEnabled()) { + LOG.debug("Restoring HFiles from directory " + bulkOutputPath); + } + String[] args = { bulkOutputPath.toString(), newTableNames[i].getNameAsString() }; + loaderResult = loader.run(args); + if(failed(loaderResult)) { + throw new IOException("Can not restore from backup directory " + logDirs + + " (check Hadoop and HBase logs). Bulk loader return code =" + loaderResult); + } + } else { + throw new IOException("Can not restore from backup directory " + logDirs + + " (check Hadoop/MR and HBase logs). WALPlayer return code =" + result); + } + LOG.debug("Restore Job finished:" + result); + } catch (Exception e) { + throw new IOException("Can not restore from backup directory " + logDirs + + " (check Hadoop and HBase logs) ", e); + } + + } + } + + + private boolean failed(int result) { + return result != 0; + } + + private boolean succeeded(int result) { + return result == 0; + } + + private LoadIncrementalHFiles createLoader() + throws IOException { + // set configuration for restore: + // LoadIncrementalHFile needs more time + // hbase.rpc.timeout 600000 + // calculates + Integer milliSecInHour = 3600000; + Configuration conf = new Configuration(getConf()); + conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, milliSecInHour); + + // By default, it is 32 and loader will fail if # of files in any region exceed this + // limit. Bad for snapshot restore. + conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE); + LoadIncrementalHFiles loader = null; try { - player.run(playerArgs); + loader = new LoadIncrementalHFiles(conf); } catch (Exception e) { - throw new IOException("cannot restore from backup directory " + logDir - + " (check Hadoop and HBase logs) " + e); + throw new IOException(e); } + return loader; + } + + private Path getBulkOutputDir(String tableName) throws IOException + { + Configuration conf = getConf(); + FileSystem fs = FileSystem.get(conf); + String tmp = conf.get("hbase.tmp.dir"); + Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-"+tableName + "-" + + EnvironmentEdgeManager.currentTime()); + fs.deleteOnExit(path); + return path; } + @Override public Configuration getConf() { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupController.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupController.java new file mode 100644 index 0000000000000000000000000000000000000000..f4ae35ca890f11a87511913fcc5bde92714ee5eb --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupController.java @@ -0,0 +1,56 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.master.MasterServices; + +/** + * The current implementation checks if the backup system table + * (hbase:backup) exists on HBasae Master startup and if it does not - + * it creates it. We need to make sure that backup system table is + * created under HBase user with ADMIN privileges + */ +public class BackupController extends BaseMasterAndRegionObserver { + private static final Log LOG = LogFactory.getLog(BackupController.class.getName()); + + @Override + public void postStartMaster(ObserverContext ctx) + throws IOException { + // Need to create the new system table for backups (if does not exist) + MasterServices master = ctx.getEnvironment().getMasterServices(); + HTableDescriptor backupHTD = BackupSystemTable.getSystemTableDescriptor(); + try{ + master.createTable(backupHTD, null, HConstants.NO_NONCE, HConstants.NO_NONCE); + LOG.info("Created "+ BackupSystemTable.getTableNameAsString()+" table"); + } catch(TableExistsException e) { + LOG.info("Table "+ BackupSystemTable.getTableNameAsString() +" already exists"); + } + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index dae24a6e028fcb78a0c1c07f1560e729f628439a..5dd67b46ca8a57aeb43a8b742ca792488723866f 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -18,9 +18,6 @@ */ package org.apache.hadoop.hbase.backup.master; -import com.google.common.base.Predicate; -import com.google.common.collect.Iterables; - import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -64,11 +61,11 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate { // TODO: LogCleaners do not have a way to get the Connection from Master. We should find a // way to pass it down here, so that this connection is not re-created every time. // It is expensive - try(Connection connection = ConnectionFactory.createConnection(this.getConf()); - final BackupSystemTable table = new BackupSystemTable(connection)) { - + try (final Connection conn = ConnectionFactory.createConnection(getConf()); + final BackupSystemTable table = new BackupSystemTable(conn)) { // If we do not have recorded backup sessions if (!table.hasBackupSessions()) { + LOG.debug("BackupLogCleaner has no backup sessions"); return files; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java new file mode 100644 index 0000000000000000000000000000000000000000..bb0219b724be0770c6d117c89ca970e1ff4e92c2 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java @@ -0,0 +1,751 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupCopyService; +import org.apache.hadoop.hbase.backup.impl.BackupException; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupUtil; +import org.apache.hadoop.hbase.backup.impl.BackupCopyService.Type; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; + +@InterfaceAudience.Private +public class FullTableBackupProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(FullTableBackupProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupInfo backupContext; + + public FullTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public FullTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, List tableList, String targetRootDir, final int workers, + final long bandwidth) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = + backupManager.createBackupContext(backupId, BackupType.FULL, tableList, targetRootDir, + workers, bandwidth); + if (tableList == null || tableList.isEmpty()) { + this.tableList = new ArrayList<>(backupContext.getTables()); + } + } + + @Override + public byte[] getResult() { + return backupId.getBytes(); + } + + /** + * Begin the overall backup. + * @param backupContext backup context + * @throws IOException exception + */ + static void beginBackup(BackupManager backupManager, BackupInfo backupContext) + throws IOException { + backupManager.setBackupContext(backupContext); + // set the start timestamp of the overall backup + long startTs = EnvironmentEdgeManager.currentTime(); + backupContext.setStartTs(startTs); + // set overall backup status: ongoing + backupContext.setState(BackupState.RUNNING); + LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); + + backupManager.updateBackupInfo(backupContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); + } + } + + private static String getMessage(Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + return msg; + } + + /** + * Delete HBase snapshot for backup. + * @param backupCtx backup context + * @throws Exception exception + */ + private static void deleteSnapshot(final MasterProcedureEnv env, + BackupInfo backupCtx, Configuration conf) + throws IOException { + LOG.debug("Trying to delete snapshot for full backup."); + for (String snapshotName : backupCtx.getSnapshotNames()) { + if (snapshotName == null) { + continue; + } + LOG.debug("Trying to delete snapshot: " + snapshotName); + HBaseProtos.SnapshotDescription.Builder builder = + HBaseProtos.SnapshotDescription.newBuilder(); + builder.setName(snapshotName); + try { + env.getMasterServices().getSnapshotManager().deleteSnapshot(builder.build()); + } catch (IOException ioe) { + LOG.debug("when deleting snapshot " + snapshotName, ioe); + } + LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + + backupCtx.getBackupId() + " succeeded."); + } + } + + /** + * Clean up directories with prefix "exportSnapshot-", which are generated when exporting + * snapshots. + * @throws IOException exception + */ + private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path stagingDir = + new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() + .toString())); + FileStatus[] files = FSUtils.listStatus(fs, stagingDir); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("exportSnapshot-")) { + LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); + if (FSUtils.delete(fs, file.getPath(), true) == false) { + LOG.warn("Can not delete " + file.getPath()); + } + } + } + } + + /** + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. + */ + static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + try { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + + backupContext.getPhase()); + if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) + || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + // now treat one backup as a transaction, clean up data that has been partially copied at + // table level + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() + + " done."); + } else { + LOG.info("No data has been copied to " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Fail the overall backup. + * @param backupContext backup context + * @param e exception + * @throws Exception exception + */ + static void failBackup(final MasterProcedureEnv env, BackupInfo backupContext, + BackupManager backupManager, Exception e, + String msg, BackupType type, Configuration conf) throws IOException { + LOG.error(msg + getMessage(e)); + // If this is a cancel exception, then we've already cleaned. + + if (backupContext.getState().equals(BackupState.CANCELLED)) { + return; + } + + // set the failure timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + + // set failure message + backupContext.setFailedMsg(e.getMessage()); + + // set overall backup status: failed + backupContext.setState(BackupState.FAILED); + + // compose the backup failed data + String backupFailedData = + "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() + + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() + + ",failedmessage=" + backupContext.getFailedMsg(); + LOG.error(backupFailedData); + + backupManager.updateBackupInfo(backupContext); + + // if full backup, then delete HBase snapshots if there already are snapshots taken + // and also clean up export snapshot log files if exist + if (type == BackupType.FULL) { + deleteSnapshot(env, backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupContext, conf); + + LOG.info("Backup " + backupContext.getBackupId() + " failed."); + } + + /** + * Do snapshot copy. + * @param backupContext backup context + * @throws Exception exception + */ + private void snapshotCopy(BackupInfo backupContext) throws Exception { + LOG.info("Snapshot copy is starting."); + + // set overall backup phase: snapshot_copy + backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // call ExportSnapshot to copy files based on hbase snapshot for backup + // ExportSnapshot only support single snapshot export, need loop for multiple tables case + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + + // number of snapshots matches number of tables + float numOfSnapshots = backupContext.getSnapshotNames().size(); + + LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); + + for (TableName table : backupContext.getTables()) { + // Currently we simply set the sub copy tasks by counting the table snapshot number, we can + // calculate the real files' size for the percentage in the future. + // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); + int res = 0; + String[] args = new String[4]; + args[0] = "-snapshot"; + args[1] = backupContext.getSnapshotName(table); + args[2] = "-copy-to"; + args[3] = backupContext.getBackupStatus(table).getTargetDir(); + + LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); + res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); + // if one snapshot export failed, do not continue for remained snapshots + if (res != 0) { + LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); + + throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + + " with reason code " + res); + } + + LOG.info("Snapshot copy " + args[1] + " finished."); + } + } + + /** + * Add manifest for the current backup. The manifest is stored + * within the table backup directory. + * @param backupContext The current backup context + * @throws IOException exception + * @throws BackupException exception + */ + private static void addManifest(BackupInfo backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException, BackupException { + // set the overall backup phase : store manifest + backupContext.setPhase(BackupPhase.STORE_MANIFEST); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + BackupManifest manifest; + + // Since we have each table's backup in its own directory structure, + // we'll store its manifest with the table directory. + for (TableName table : backupContext.getTables()) { + manifest = new BackupManifest(backupContext, table); + ArrayList ancestors = backupManager.getAncestors(backupContext, table); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + + if (type == BackupType.INCREMENTAL) { + // We'll store the log timestamps for this table only in its manifest. + HashMap> tableTimestampMap = + new HashMap>(); + tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); + manifest.setIncrTimestampMap(tableTimestampMap); + ArrayList ancestorss = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestorss) { + manifest.addDependentImage(image); + } + } + manifest.store(conf); + } + + // For incremental backup, we store a overall manifest in + // /WALs/ + // This is used when created the next incremental backup + if (type == BackupType.INCREMENTAL) { + manifest = new BackupManifest(backupContext); + // set the table region server start and end timestamps for incremental backup + manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); + ArrayList ancestors = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + manifest.store(conf); + } + } + + /** + * Get backup request meta data dir as string. + * @param backupContext backup context + * @return meta data dir + */ + private static String obtainBackupMetaDataStr(BackupInfo backupContext) { + StringBuffer sb = new StringBuffer(); + sb.append("type=" + backupContext.getType() + ",tablelist="); + for (TableName table : backupContext.getTables()) { + sb.append(table + ";"); + } + if (sb.lastIndexOf(";") > 0) { + sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); + } + sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); + + return sb.toString(); + } + + /** + * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying + * hlogs. + * @throws IOException exception + */ + private static void cleanupDistCpLog(BackupInfo backupContext, Configuration conf) + throws IOException { + Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("_distcp_logs")) { + LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + } + + /** + * Complete the overall backup. + * @param backupContext backup context + * @throws Exception exception + */ + static void completeBackup(final MasterProcedureEnv env, BackupInfo backupContext, + BackupManager backupManager, BackupType type, Configuration conf) throws IOException { + // set the complete timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + // set overall backup status: complete + backupContext.setState(BackupState.COMPLETE); + backupContext.setProgress(100); + // add and store the manifest for the backup + addManifest(backupContext, backupManager, type, conf); + + // after major steps done and manifest persisted, do convert if needed for incremental backup + /* in-fly convert code here, provided by future jira */ + LOG.debug("in-fly convert code here, provided by future jira"); + + // compose the backup complete data + String backupCompleteData = + obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() + + ",completets=" + backupContext.getEndTs() + ",bytescopied=" + + backupContext.getTotalBytesCopied(); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); + } + backupManager.updateBackupInfo(backupContext); + + // when full backup is done: + // - delete HBase snapshot + // - clean up directories with prefix "exportSnapshot-", which are generated when exporting + // snapshots + if (type == BackupType.FULL) { + deleteSnapshot(env, backupContext, conf); + cleanupExportSnapshotLog(conf); + } else if (type == BackupType.INCREMENTAL) { + cleanupDistCpLog(backupContext, conf); + } + + LOG.info("Backup " + backupContext.getBackupId() + " completed."); + } + + /** + * Wrap a SnapshotDescription for a target table. + * @param table table + * @return a SnapshotDescription especially for backup. + */ + static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { + // Mock a SnapshotDescription from backupContext to call SnapshotManager function, + // Name it in the format "snapshot__
" + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + builder.setTable(tableName.getNameAsString()); + builder.setName(snapshotName); + HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); + + LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() + + " from backupContext to request snapshot for backup."); + + return backupSnapshot; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("full backup", ioe); + return Flow.NO_MORE_STATE; + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case PRE_SNAPSHOT_TABLE: + beginBackup(backupManager, backupContext); + String savedStartCode = null; + boolean firstBackup = false; + // do snapshot for full table backup + + try { + savedStartCode = backupManager.readBackupStartCode(); + firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L; + if (firstBackup) { + // This is our first backup. Let's put some marker on ZK so that we can hold the logs + // while we do the backup. + backupManager.writeBackupStartCode(0L); + } + // We roll log here before we do the snapshot. It is possible there is duplicate data + // in the log that is already in the snapshot. But if we do it after the snapshot, we + // could have data loss. + // A better approach is to do the roll log on each RS in the same global procedure as + // the snapshot. + LOG.info("Execute roll log procedure for full backup ..."); + MasterProcedureManager mpm = env.getMasterServices().getMasterProcedureManagerHost() + .getProcedureManager(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); + Map props= new HashMap(); + props.put("backupRoot", backupContext.getTargetRootDir()); + long waitTime = MasterProcedureUtil.execProcedure(mpm, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + MasterProcedureUtil.waitForProcedure(mpm, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props, waitTime, + conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER), + conf.getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE)); + + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + if (firstBackup) { + // Updates registered log files + // We record ALL old WAL files as registered, because + // this is a first full backup in the system and these + // files are not needed for next incremental backup + List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps); + backupManager.recordWALFiles(logFiles); + } + } catch (BackupException e) { + // fail the overall backup and return + failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + } + setNextState(FullTableBackupState.SNAPSHOT_TABLES); + break; + case SNAPSHOT_TABLES: + for (TableName tableName : tableList) { + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + HBaseProtos.SnapshotDescription backupSnapshot; + + // wrap a SnapshotDescription for offline/online snapshot + backupSnapshot = wrapSnapshotDescription(tableName,snapshotName); + try { + env.getMasterServices().getSnapshotManager().deleteSnapshot(backupSnapshot); + } catch (IOException e) { + LOG.debug("Unable to delete " + snapshotName, e); + } + // Kick off snapshot for backup + try { + env.getMasterServices().getSnapshotManager().takeSnapshot(backupSnapshot); + } catch (IOException e) { + LOG.debug("Unable to take snapshot: " + snapshotName, e); + } + long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout( + env.getMasterConfiguration(), + backupSnapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); + BackupUtil.waitForSnapshot(backupSnapshot, waitTime, + env.getMasterServices().getSnapshotManager(), env.getMasterConfiguration()); + // set the snapshot name in BackupStatus of this table, only after snapshot success. + backupContext.setSnapshotName(tableName, backupSnapshot.getName()); + } + setNextState(FullTableBackupState.SNAPSHOT_COPY); + break; + case SNAPSHOT_COPY: + // do snapshot copy + LOG.debug("snapshot copy for " + backupId); + try { + this.snapshotCopy(backupContext); + } catch (Exception e) { + // fail the overall backup and return + failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + } + // Updates incremental backup table set + backupManager.addIncrementalBackupTableSet(backupContext.getTables()); + setNextState(FullTableBackupState.BACKUP_COMPLETE); + break; + + case BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupClientUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + + // backup complete + completeBackup(env, backupContext, backupManager, BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + LOG.error("Backup failed in " + state); + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state) + throws IOException { + if (state != FullTableBackupState.PRE_SNAPSHOT_TABLE) { + deleteSnapshot(env, backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + if (state == FullTableBackupState.SNAPSHOT_COPY) { + cleanupTargetDir(backupContext, conf); + } + } + + @Override + protected FullTableBackupState getState(final int stateId) { + return FullTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final FullTableBackupState state) { + return state.getNumber(); + } + + @Override + protected FullTableBackupState getInitialState() { + return FullTableBackupState.PRE_SNAPSHOT_TABLE; + } + + @Override + protected void setNextState(final FullTableBackupState state) { + if (aborted.get()) { + setAbortFailure("backup-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + BackupProtos.BackupProcContext toBackupContext() { + BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); + ctxBuilder.setCtx(backupContext.toProtosBackupInfo()); + if (newTimestamps != null && !newTimestamps.isEmpty()) { + BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); + for (Entry entry : newTimestamps.entrySet()) { + tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); + ctxBuilder.addServerTimestamp(tsBuilder.build()); + } + } + return ctxBuilder.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); + backupProcCtx.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); + backupContext = BackupInfo.fromProto(proto.getCtx()); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + List svrTimestamps = proto.getServerTimestampList(); + if (svrTimestamps != null && !svrTimestamps.isEmpty()) { + newTimestamps = new HashMap<>(); + for (ServerTimestamp ts : svrTimestamps) { + newTimestamps.put(ts.getServer(), ts.getTimestamp()); + } + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java new file mode 100644 index 0000000000000000000000000000000000000000..3e4cc1ea8af150699e899c0ba86fbfa6689a1f07 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java @@ -0,0 +1,331 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupCopyService; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupUtil; +import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupCopyService.Type; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; + +@InterfaceAudience.Private +public class IncrementalTableBackupProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(IncrementalTableBackupProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupInfo backupContext; + + public IncrementalTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public IncrementalTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, + List tableList, String targetRootDir, final int workers, + final long bandwidth) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = backupManager.createBackupContext(backupId, + BackupType.INCREMENTAL, tableList, targetRootDir, workers, (int)bandwidth); + } + + @Override + public byte[] getResult() { + return backupId.getBytes(); + } + + private List filterMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for(String file : incrBackupFileList){ + if(fs.exists(new Path(file))){ + list.add(file); + } else{ + LOG.warn("Can't find file: "+file); + } + } + return list; + } + + /** + * Do incremental copy. + * @param backupContext backup context + */ + private void incrementalCopy(BackupInfo backupContext) throws Exception { + + LOG.info("Incremental copy is starting."); + + // set overall backup phase: incremental_copy + backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // get incremental backup file list and prepare parms for DistCp + List incrBackupFileList = backupContext.getIncrBackupFileList(); + // filter missing files out (they have been copied by previous backups) + incrBackupFileList = filterMissingFiles(incrBackupFileList); + String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + int res = copyService.copy(backupContext, backupManager, conf, + BackupCopyService.Type.INCREMENTAL, strArr); + + if (res != 0) { + LOG.error("Copy incremental log files failed with return code: " + res + "."); + throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir()); + } + LOG.info("Incremental copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir() + " finished."); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, + final IncrementalTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("incremental backup", ioe); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case PREPARE_INCREMENTAL: + FullTableBackupProcedure.beginBackup(backupManager, backupContext); + LOG.debug("For incremental backup, current table set is " + + backupManager.getIncrementalBackupTableSet()); + try { + IncrementalBackupManager incrBackupManager =new IncrementalBackupManager(backupManager); + + newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, + "Unexpected Exception : ", BackupType.INCREMENTAL, conf); + } + + setNextState(IncrementalTableBackupState.INCREMENTAL_COPY); + break; + case INCREMENTAL_COPY: + try { + // copy out the table and region info files for each table + BackupUtil.copyTableRegionInfo(backupContext, conf); + incrementalCopy(backupContext); + // Save list of WAL files copied + backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, + "Unexpected exception doing incremental copy : ", BackupType.INCREMENTAL, conf); + } + setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE); + break; + case INCR_BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // Set the previousTimestampMap which is before this current log roll to the manifest. + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + backupContext.setIncrTimestampMap(previousTimestampMap); + + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = BackupClientUtil + .getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + // backup complete + FullTableBackupProcedure.completeBackup(env, backupContext, backupManager, + BackupType.INCREMENTAL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, + final IncrementalTableBackupState state) throws IOException { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + FullTableBackupProcedure.cleanupTargetDir(backupContext, conf); + } + + @Override + protected IncrementalTableBackupState getState(final int stateId) { + return IncrementalTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final IncrementalTableBackupState state) { + return state.getNumber(); + } + + @Override + protected IncrementalTableBackupState getInitialState() { + return IncrementalTableBackupState.PREPARE_INCREMENTAL; + } + + @Override + protected void setNextState(final IncrementalTableBackupState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + BackupProtos.BackupProcContext toBackupContext() { + BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); + ctxBuilder.setCtx(backupContext.toProtosBackupInfo()); + if (newTimestamps != null && !newTimestamps.isEmpty()) { + BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); + for (Entry entry : newTimestamps.entrySet()) { + tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); + ctxBuilder.addServerTimestamp(tsBuilder.build()); + } + } + return ctxBuilder.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); + backupProcCtx.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); + backupContext = BackupInfo.fromProto(proto.getCtx()); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + List svrTimestamps = proto.getServerTimestampList(); + if (svrTimestamps != null && !svrTimestamps.isEmpty()) { + newTimestamps = new HashMap<>(); + for (ServerTimestamp ts : svrTimestamps) { + newTimestamps.put(ts.getServer(), ts.getTimestamp()); + } + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java index f96682fdb85d802e7e6346d20aa4fe24ec633c65..a5f32bb97f931399640ba66d7a3483825198186d 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure.Procedure; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.zookeeper.KeeperException; @@ -94,7 +95,14 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager { for (ServerName sn : serverNames) { servers.add(sn.toString()); } - Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), new byte[0], servers); + + List conf = desc.getConfigurationList(); + byte[] data = new byte[0]; + if(conf.size() > 0){ + // Get backup root path + data = conf.get(0).getValue().getBytes(); + } + Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), data, servers); if (proc == null) { String msg = "Failed to submit distributed procedure for '" + desc.getInstance() + "'"; LOG.error(msg); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java index d5241406801da4826f904a3de71667a292a21db6..58cd4b20a79cde5faea402f39d0b9fcbedf857ff 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -42,16 +42,20 @@ public class LogRollBackupSubprocedure extends Subprocedure { private final RegionServerServices rss; private final LogRollBackupSubprocedurePool taskManager; private FSHLog hlog; + private String backupRoot; public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member, ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - LogRollBackupSubprocedurePool taskManager) { + LogRollBackupSubprocedurePool taskManager, byte[] data) { super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener, wakeFrequency, timeout); LOG.info("Constructing a LogRollBackupSubprocedure."); this.rss = rss; this.taskManager = taskManager; + if(data != null) { + backupRoot = new String(data); + } } /** @@ -77,7 +81,7 @@ public class LogRollBackupSubprocedure extends Subprocedure { Connection connection = rss.getConnection(); try(final BackupSystemTable table = new BackupSystemTable(connection)) { // sanity check, good for testing - HashMap serverTimestampMap = table.readRegionServerLastLogRollResult(); + HashMap serverTimestampMap = table.readRegionServerLastLogRollResult(backupRoot); String host = rss.getServerName().getHostname(); int port = rss.getServerName().getPort(); String server = host + ":" + port; @@ -88,7 +92,7 @@ public class LogRollBackupSubprocedure extends Subprocedure { return null; } // write the log number to hbase:backup. - table.writeRegionServerLastLogRollResult(server, filenum); + table.writeRegionServerLastLogRollResult(server, filenum, backupRoot); return null; } catch (Exception e) { LOG.error(e); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index aca190c51dec5a717d36c4764d3e708112a19ed7..3b77183d44d7eb45fa4cf632a813f89e99794664 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -106,7 +106,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa * If in a running state, creates the specified subprocedure for handling a backup procedure. * @return Subprocedure to submit to the ProcedureMemeber. */ - public Subprocedure buildSubprocedure() { + public Subprocedure buildSubprocedure(byte[] data) { // don't run a backup if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { @@ -124,7 +124,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa LogRollBackupSubprocedurePool taskManager = new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis, - taskManager); + taskManager, data); } @@ -135,7 +135,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa @Override public Subprocedure buildSubprocedure(String name, byte[] data) { - return LogRollRegionServerProcedureManager.this.buildSubprocedure(); + return LogRollRegionServerProcedureManager.this.buildSubprocedure(data); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java new file mode 100644 index 0000000000000000000000000000000000000000..76402c724a86218c6f5ef0ad6caaa6b3d62f28aa --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.util; +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +/** + * Backup set is a named group of HBase tables, + * which are managed together by Backup/Restore + * framework. Instead of using list of tables in backup or restore + * operation, one can use set's name instead. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class BackupSet { + private final String name; + private final List tables; + + public BackupSet(String name, List tables) { + this.name = name; + this.tables = tables; + } + + public String getName() { + return name; + } + + public List getTables() { + return tables; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(name).append("={"); + for (int i = 0; i < tables.size(); i++) { + sb.append(tables.get(i)); + if (i < tables.size() - 1) { + sb.append(","); + } + } + sb.append("}"); + return sb.toString(); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 02fcbba122c24f70c857ab453d6a53870502d2c4..736b8a55f54cac4f1259f19f11ab8b6f960b7aba 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -27,22 +27,24 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.util.StringUtils; /** * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL} files. @@ -231,21 +233,31 @@ public class WALInputFormat extends InputFormat { List getSplits(final JobContext context, final String startKey, final String endKey) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); - Path inputDir = new Path(conf.get("mapreduce.input.fileinputformat.inputdir")); + + Path[] inputPaths = getInputPaths(conf); long startTime = conf.getLong(startKey, Long.MIN_VALUE); long endTime = conf.getLong(endKey, Long.MAX_VALUE); - FileSystem fs = inputDir.getFileSystem(conf); - List files = getFiles(fs, inputDir, startTime, endTime); - - List splits = new ArrayList(files.size()); - for (FileStatus file : files) { + FileSystem fs = FileSystem.get(conf); + + List allFiles = new ArrayList(); + for(Path inputPath: inputPaths){ + List files = getFiles(fs, inputPath, startTime, endTime); + allFiles.addAll(files); + } + List splits = new ArrayList(allFiles.size()); + for (FileStatus file : allFiles) { splits.add(new WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime)); } return splits; } + private Path[] getInputPaths(Configuration conf) { + String inpDirs = conf.get("mapreduce.input.fileinputformat.inputdir"); + return StringUtils.stringToPath(inpDirs.split(",")); + } + private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { List result = new ArrayList(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 2ceeda5901d5a85f135c94efc47bb85af042fccd..4cdbad3d2a0a5283da1bc69d55197cb750e4d484 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -47,6 +47,8 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -69,9 +71,9 @@ import org.apache.hadoop.util.ToolRunner; public class WALPlayer extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(WALPlayer.class); final static String NAME = "WALPlayer"; - final static String BULK_OUTPUT_CONF_KEY = "wal.bulk.output"; - final static String TABLES_KEY = "wal.input.tables"; - final static String TABLE_MAP_KEY = "wal.input.tablesmap"; + public final static String BULK_OUTPUT_CONF_KEY = "wal.bulk.output"; + public final static String TABLES_KEY = "wal.input.tables"; + public final static String TABLE_MAP_KEY = "wal.input.tablesmap"; // This relies on Hadoop Configuration to handle warning about deprecated configs and // to set the correct non-deprecated configs when an old one shows up. @@ -139,7 +141,8 @@ public class WALPlayer extends Configured implements Tool { protected static class WALMapper extends Mapper { private Map tables = new TreeMap(); - + + @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { @@ -152,6 +155,7 @@ public class WALPlayer extends Configured implements Tool { Put put = null; Delete del = null; Cell lastCell = null; + for (Cell cell : value.getCells()) { // filtering WAL meta entries if (WALEdit.isMetaEditFamily(cell)) { @@ -210,6 +214,13 @@ public class WALPlayer extends Configured implements Tool { } @Override + protected void + cleanup(Mapper.Context context) + throws IOException, InterruptedException { + super.cleanup(context); + } + + @Override public void setup(Context context) throws IOException { String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY); String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY); @@ -261,7 +272,7 @@ public class WALPlayer extends Configured implements Tool { Configuration conf = getConf(); setupTime(conf, HLogInputFormat.START_TIME_KEY); setupTime(conf, HLogInputFormat.END_TIME_KEY); - Path inputDir = new Path(args[0]); + String inputDirs = args[0]; String[] tables = args[1].split(","); String[] tableMap; if (args.length > 2) { @@ -275,13 +286,18 @@ public class WALPlayer extends Configured implements Tool { } conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); - Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + inputDir)); + Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis())); job.setJarByClass(WALPlayer.class); - FileInputFormat.setInputPaths(job, inputDir); + + FileInputFormat.addInputPaths(job, inputDirs); + job.setInputFormatClass(WALInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); + String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { + LOG.debug("add incremental job :"+hfileOutPath); + // the bulk HFile case if (tables.length != 1) { throw new IOException("Exactly one table must be specified for the bulk export option"); @@ -297,6 +313,8 @@ public class WALPlayer extends Configured implements Tool { RegionLocator regionLocator = conn.getRegionLocator(tableName)) { HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); } + LOG.debug("success configuring load incremental job"); + TableMapReduceUtil.addDependencyJars(job.getConfiguration(), com.google.common.base.Preconditions.class); } else { @@ -311,6 +329,7 @@ public class WALPlayer extends Configured implements Tool { return job; } + /** * Print usage * @param errorMsg Error message. Can be null. @@ -360,6 +379,7 @@ public class WALPlayer extends Configured implements Tool { System.exit(-1); } Job job = createSubmittableJob(args); - return job.waitForCompletion(true) ? 0 : 1; + int result =job.waitForCompletion(true) ? 0 : 1; + return result; } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 0d5ed1f63136834ab703990d5a3d9cf544dec76c..30da77945a69cc4261c58d5d99b23d7bc1e30a33 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -81,9 +81,8 @@ import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTableHelper; -import org.apache.hadoop.hbase.backup.impl.FullTableBackupProcedure; -import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupProcedure; +import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure; +import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -2618,10 +2617,14 @@ public class HMaster extends HRegionServer implements MasterServices { List tableList, final String targetRootDir, final int workers, final long bandwidth) throws IOException { long procId; - String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime(); + String backupId = BackupRestoreConstants.BACKUPID_PREFIX + + EnvironmentEdgeManager.currentTime(); if (type == BackupType.INCREMENTAL) { - Set incrTableSet = - BackupSystemTableHelper.getIncrementalBackupTableSet(clusterConnection); + Set incrTableSet = null; + try (BackupSystemTable table = new BackupSystemTable(getConnection())) { + incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); + } + if (incrTableSet.isEmpty()) { LOG.warn("Incremental backup table set contains no table.\n" + "Use 'backup create full' or 'backup stop' to \n " diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 0ce8ee4b7c96cb8355399fb4ff1df1159594ba29..81e56b2c1ce2d672c74c002e617d2034b64485f2 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; +import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; @@ -521,7 +522,7 @@ public class HRegionServer extends HasThread implements FSUtils.setupShortCircuitRead(this.conf); // Disable usage of meta replicas in the regionserver this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); - + BackupManager.decorateRSConfiguration(conf); // Config'ed params this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 1416523088064c2a7c6432069085393e269d91ba..8358e477c4f14597855727a6d48a571245045a42 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.backup; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -25,18 +26,19 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState; -import org.apache.hadoop.hbase.backup.impl.BackupContext; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.BackupUtil; -import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; -import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -48,7 +50,6 @@ import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.junit.AfterClass; import org.junit.BeforeClass; -import com.google.common.collect.Lists; /** * This class is only a base for other integration-level backup tests. Do not add tests here. @@ -64,17 +65,17 @@ public class TestBackupBase { protected static HBaseTestingUtility TEST_UTIL; protected static HBaseTestingUtility TEST_UTIL2; - protected static TableName table1; - protected static TableName table2; - protected static TableName table3; - protected static TableName table4; + protected static TableName table1 = TableName.valueOf("table1"); + protected static TableName table2 = TableName.valueOf("table2"); + protected static TableName table3 = TableName.valueOf("table3"); + protected static TableName table4 = TableName.valueOf("table4"); protected static TableName table1_restore = TableName.valueOf("table1_restore"); protected static TableName table2_restore = TableName.valueOf("table2_restore"); protected static TableName table3_restore = TableName.valueOf("table3_restore"); protected static TableName table4_restore = TableName.valueOf("table4_restore"); - protected static final int NB_ROWS_IN_BATCH = 100; + protected static final int NB_ROWS_IN_BATCH = 999; protected static final byte[] qualName = Bytes.toBytes("q1"); protected static final byte[] famName = Bytes.toBytes("f"); @@ -91,15 +92,13 @@ public class TestBackupBase { @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); - TEST_UTIL.getConfiguration().set("hbase.procedure.regionserver.classes", - LogRollRegionServerProcedureManager.class.getName()); - TEST_UTIL.getConfiguration().set("hbase.procedure.master.classes", - LogRollMasterProcedureManager.class.getName()); - TEST_UTIL.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); + conf1 = TEST_UTIL.getConfiguration(); + conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); + // Set MultiWAL (with 2 default WAL files per RS) + //conf1.set(WAL_PROVIDER, "multiwal"); TEST_UTIL.startMiniZKCluster(); MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster(); - conf1 = TEST_UTIL.getConfiguration(); conf2 = HBaseConfiguration.create(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); TEST_UTIL2 = new HBaseTestingUtility(conf2); @@ -113,9 +112,21 @@ public class TestBackupBase { LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT"; LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); - + waitForSystemTable(); createTables(); } + + static void waitForSystemTable() throws Exception + { + try(Admin admin = TEST_UTIL.getAdmin();) { + while (!admin.tableExists(BackupSystemTable.getTableName()) + || !admin.isTableAvailable(BackupSystemTable.getTableName())) { + Thread.sleep(1000); + } + } + LOG.debug("backup table exists and available"); + + } /** * @throws java.lang.Exception @@ -155,6 +166,10 @@ public class TestBackupBase { return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR); } + protected String incrementalTableBackup(List tables) throws IOException { + return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + } + protected static void loadTable(HTable table) throws Exception { Put p; // 100 + 1 row to t1_syncup @@ -169,7 +184,6 @@ public class TestBackupBase { long tid = System.currentTimeMillis(); table1 = TableName.valueOf("test-" + tid); - BackupSystemTable backupTable = new BackupSystemTable(TEST_UTIL.getConnection()); HBaseAdmin ha = TEST_UTIL.getHBaseAdmin(); HTableDescriptor desc = new HTableDescriptor(table1); HColumnDescriptor fam = new HColumnDescriptor(famName); @@ -197,26 +211,28 @@ public class TestBackupBase { } protected boolean checkSucceeded(String backupId) throws IOException { - BackupContext status = getBackupContext(backupId); + BackupInfo status = getBackupContext(backupId); if (status == null) return false; return status.getState() == BackupState.COMPLETE; } protected boolean checkFailed(String backupId) throws IOException { - BackupContext status = getBackupContext(backupId); + BackupInfo status = getBackupContext(backupId); if (status == null) return false; return status.getState() == BackupState.FAILED; } - private BackupContext getBackupContext(String backupId) throws IOException { - Configuration conf = conf1;//BackupClientImpl.getConf(); - try (Connection connection = ConnectionFactory.createConnection(conf); - BackupSystemTable table = new BackupSystemTable(connection)) { - BackupContext status = table.readBackupStatus(backupId); + private BackupInfo getBackupContext(String backupId) throws IOException { + try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + BackupInfo status = table.readBackupInfo(backupId); return status; } } + protected BackupClient getBackupClient(){ + return BackupRestoreFactory.getBackupClient(conf1); + } + protected RestoreClient getRestoreClient() { return BackupRestoreFactory.getRestoreClient(conf1); @@ -232,4 +248,15 @@ public class TestBackupBase { } return ret; } + + protected void dumpBackupDir() throws IOException + { + // Dump Backup Dir + FileSystem fs = FileSystem.get(conf1); + RemoteIterator it = fs.listFiles( new Path(BACKUP_ROOT_DIR), true); + while(it.hasNext()){ + LOG.debug("DDEBUG: "+it.next().getPath()); + } + + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java index 7f5846cb841fd15730dc975a64f07d5946645bbf..62c47d635e43cb180525aea09d7484805c3df24e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.backup; -import static org.junit.Assert.assertTrue; - import java.util.List; import org.apache.commons.logging.Log; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java new file mode 100644 index 0000000000000000000000000000000000000000..eeb89b5b7c12fcf1e5d61eb9bf464abfa2785f8f --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupDelete extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupDelete.class); + + /** + * Verify that full backup is created on a single table with data correctly. Verify that history + * works as expected + * @throws Exception + */ + @Test + public void testBackupDelete() throws Exception { + LOG.info("test backup delete on a single table with data"); + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] backupIds = new String[] { backupId }; + getBackupClient().deleteBackups(backupIds); + + LOG.info("delete_backup"); + String output = baos.toString(); + LOG.info(baos.toString()); + assertTrue(output.indexOf("Delete backup for backupID=" + backupId + " completed.") >= 0); + } + + /** + * Verify that full backup is created on a single table with data correctly. Verify that history + * works as expected + * @throws Exception + */ + @Test + public void testBackupDeleteCommand() throws Exception { + LOG.info("test backup delete on a single table with data: command-line"); + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"delete", backupId }; + // Run backup + + try{ + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + } catch(Exception e){ + LOG.error("failed", e); + } + LOG.info("delete_backup"); + String output = baos.toString(); + LOG.info(baos.toString()); + assertTrue(output.indexOf("Delete backup for backupID=" + backupId + " completed.") >= 0); + } + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java new file mode 100644 index 0000000000000000000000000000000000000000..4f7cb11fa827d5c52b0d8d2c0597bb1d816105c5 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupDescribe extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupDescribe.class); + + /** + * Verify that full backup is created on a single table with data correctly. Verify that describe + * works as expected + * @throws Exception + */ + @Test + public void testBackupDescribe() throws Exception { + + LOG.info("test backup describe on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + + + BackupInfo info = getBackupClient().getBackupInfo(backupId); + assertTrue(info.getState() == BackupState.COMPLETE); + + } + + @Test + public void testBackupDescribeCommand() throws Exception { + + LOG.info("test backup describe on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"describe", backupId }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + String response = baos.toString(); + assertTrue(response.indexOf(backupId) > 0); + assertTrue(response.indexOf("COMPLETE") > 0); + + BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupInfo status = table.readBackupInfo(backupId); + String desc = status.getShortDescription(); + table.close(); + assertTrue(response.indexOf(desc) >= 0); + + } + + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java index 175bce06df0b131817c155d1086aa01918e8dade..3ef68e61678d768e1b9b20db7ba9a79dde650133 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java @@ -65,8 +65,7 @@ public class TestBackupLogCleaner extends TestBackupBase { List tableSetFullList = Lists.newArrayList(table1, table2, table3, table4); - try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - BackupSystemTable systemTable = new BackupSystemTable(connection)) { + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { // Verify that we have no backup sessions yet assertFalse(systemTable.hasBackupSessions()); @@ -76,16 +75,18 @@ public class TestBackupLogCleaner extends TestBackupBase { cleaner.setConf(TEST_UTIL.getConfiguration()); Iterable deletable = cleaner.getDeletableFiles(walFiles); + int size = Iterables.size(deletable); + // We can delete all files because we do not have yet recorded backup sessions - assertTrue(Iterables.size(deletable) == walFiles.size()); + assertTrue(size == walFiles.size()); - systemTable.addWALFiles(swalFiles, "backup"); + systemTable.addWALFiles(swalFiles, "backup", "root"); String backupIdFull = fullTableBackup(tableSetFullList); - // getBackupClient().create(BackupType.FULL, tableSetFullList, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdFull)); // Check one more time deletable = cleaner.getDeletableFiles(walFiles); // We can delete wal files because they were saved into hbase:backup table - int size = Iterables.size(deletable); + size = Iterables.size(deletable); assertTrue(size == walFiles.size()); List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); @@ -95,7 +96,6 @@ public class TestBackupLogCleaner extends TestBackupBase { // New list of wal files is greater than the previous one, // because new wal per RS have been opened after full backup assertTrue(walFiles.size() < newWalFiles.size()); - // TODO : verify that result files are not walFiles collection Connection conn = ConnectionFactory.createConnection(conf1); // #2 - insert some data to table HTable t1 = (HTable) conn.getTable(table1); @@ -123,6 +123,7 @@ public class TestBackupLogCleaner extends TestBackupBase { List tableSetIncList = Lists.newArrayList(table1, table2, table3); String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdIncMultiple)); deletable = cleaner.getDeletableFiles(newWalFiles); assertTrue(Iterables.size(deletable) == newWalFiles.size()); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java new file mode 100644 index 0000000000000000000000000000000000000000..716a22a7e9422b0af8f0fe8ed0fb24af034d851e --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupShowHistory extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupShowHistory.class); + + /** + * Verify that full backup is created on a single table with data correctly. Verify that history + * works as expected + * @throws Exception + */ + @Test + public void testBackupHistory() throws Exception { + + LOG.info("test backup history on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + List history = getBackupClient().getHistory(10); + assertTrue(history.size() > 0); + boolean success = false; + for(BackupInfo info: history){ + if(info.getBackupId().equals(backupId)){ + success = true; break; + } + } + assertTrue(success); + LOG.info("show_history"); + + } + + @Test + public void testBackupHistoryCommand() throws Exception { + + LOG.info("test backup history on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"history", "-n", "10" }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + LOG.info("show_history"); + String output = baos.toString(); + LOG.info(baos.toString()); + assertTrue(output.indexOf(backupId) > 0); + } +} \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java new file mode 100644 index 0000000000000000000000000000000000000000..ce04b0b7f85e6a05771bc06a905db0ebc766dd62 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupStatusProgress extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupStatusProgress.class); + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testBackupStatusProgress() throws Exception { + + LOG.info("test backup status/progress on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + + + BackupInfo info = getBackupClient().getBackupInfo(backupId); + assertTrue(info.getState() == BackupState.COMPLETE); + int p = getBackupClient().getProgress(backupId); + LOG.debug(info.getShortDescription()); + assertTrue(p > 0); + + } + + @Test + public void testBackupStatusProgressCommand() throws Exception { + + LOG.info("test backup status/progress on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"describe", backupId }; + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + String responce = baos.toString(); + assertTrue(responce.indexOf(backupId) > 0); + assertTrue(responce.indexOf("COMPLETE") > 0); + + baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + args = new String[]{"progress", backupId }; + ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret ==0); + responce = baos.toString(); + assertTrue(responce.indexOf(backupId) >= 0); + assertTrue(responce.indexOf("progress") > 0); + assertTrue(responce.indexOf("100") > 0); + + + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index f775c65b38ee758f7470aa662998fa2a3f632ebf..7ea4338fa670fd312f0deef02439a0c11d7e5daf 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -38,14 +38,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupContext; -import org.apache.hadoop.hbase.backup.impl.BackupContext.BackupState; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTableHelper; -import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; import org.junit.AfterClass; @@ -69,10 +65,21 @@ public class TestBackupSystemTable { @BeforeClass public static void setUp() throws Exception { - cluster = UTIL.startMiniCluster(); - conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); + cluster = UTIL.startMiniCluster(); + conn = UTIL.getConnection(); + waitForSystemTable(); } - + + static void waitForSystemTable() throws Exception + { + try(Admin admin = UTIL.getAdmin();) { + while (!admin.tableExists(BackupSystemTable.getTableName()) + || !admin.isTableAvailable(BackupSystemTable.getTableName())) { + Thread.sleep(1000); + } + } + } + @Before public void before() throws IOException { table = new BackupSystemTable(conn); @@ -83,22 +90,21 @@ public class TestBackupSystemTable { if (table != null) { table.close(); } + } @Test public void testUpdateReadDeleteBackupStatus() throws IOException { - BackupContext ctx = createBackupContext(); - table.updateBackupStatus(ctx); - BackupContext readCtx = table.readBackupStatus(ctx.getBackupId()); + BackupInfo ctx = createBackupContext(); + table.updateBackupInfo(ctx); + BackupInfo readCtx = table.readBackupInfo(ctx.getBackupId()); assertTrue(compare(ctx, readCtx)); - // try fake backup id - readCtx = table.readBackupStatus("fake"); - + readCtx = table.readBackupInfo("fake"); assertNull(readCtx); // delete backup context - table.deleteBackupStatus(ctx.getBackupId()); - readCtx = table.readBackupStatus(ctx.getBackupId()); + table.deleteBackupInfo(ctx.getBackupId()); + readCtx = table.readBackupInfo(ctx.getBackupId()); assertNull(readCtx); cleanBackupTable(); } @@ -106,8 +112,8 @@ public class TestBackupSystemTable { @Test public void testWriteReadBackupStartCode() throws IOException { Long code = 100L; - table.writeBackupStartCode(code); - String readCode = table.readBackupStartCode(); + table.writeBackupStartCode(code, "root"); + String readCode = table.readBackupStartCode("root"); assertEquals(code, new Long(Long.parseLong(readCode))); cleanBackupTable(); } @@ -124,23 +130,23 @@ public class TestBackupSystemTable { @Test public void testBackupHistory() throws IOException { int n = 10; - List list = createBackupContextList(n); + List list = createBackupContextList(n); // Load data - for (BackupContext bc : list) { + for (BackupInfo bc : list) { // Make sure we set right status bc.setState(BackupState.COMPLETE); - table.updateBackupStatus(bc); + table.updateBackupInfo(bc); } // Reverse list for comparison Collections.reverse(list); - ArrayList history = table.getBackupHistory(); + ArrayList history = table.getBackupHistory(); assertTrue(history.size() == n); for (int i = 0; i < n; i++) { - BackupContext ctx = list.get(i); - BackupCompleteData data = history.get(i); + BackupInfo ctx = list.get(i); + BackupInfo data = history.get(i); assertTrue(compare(ctx, data)); } @@ -149,15 +155,52 @@ public class TestBackupSystemTable { } @Test + public void testBackupDelete() throws IOException { + + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + int n = 10; + List list = createBackupContextList(n); + + // Load data + for (BackupInfo bc : list) { + // Make sure we set right status + bc.setState(BackupState.COMPLETE); + table.updateBackupInfo(bc); + } + + // Verify exists + for (BackupInfo bc : list) { + assertNotNull(table.readBackupInfo(bc.getBackupId())); + } + + // Delete all + for (BackupInfo bc : list) { + table.deleteBackupInfo(bc.getBackupId()); + } + + // Verify do not exists + for (BackupInfo bc : list) { + assertNull(table.readBackupInfo(bc.getBackupId())); + } + + cleanBackupTable(); + } + + } + + + + @Test public void testRegionServerLastLogRollResults() throws IOException { String[] servers = new String[] { "server1", "server2", "server3" }; Long[] timestamps = new Long[] { 100L, 102L, 107L }; for (int i = 0; i < servers.length; i++) { - table.writeRegionServerLastLogRollResult(servers[i], timestamps[i]); + table.writeRegionServerLastLogRollResult(servers[i], timestamps[i], "root"); } - HashMap result = table.readRegionServerLastLogRollResult(); + HashMap result = table.readRegionServerLastLogRollResult("root"); assertTrue(servers.length == result.size()); Set keys = result.keySet(); String[] keysAsArray = new String[keys.size()]; @@ -188,9 +231,10 @@ public class TestBackupSystemTable { tables2.add(TableName.valueOf("t4")); tables2.add(TableName.valueOf("t5")); - table.addIncrementalBackupTableSet(tables1); - TreeSet res1 = (TreeSet) BackupSystemTableHelper - .getIncrementalBackupTableSet(conn); + table.addIncrementalBackupTableSet(tables1, "root"); + BackupSystemTable table = new BackupSystemTable(conn); + TreeSet res1 = (TreeSet) + table.getIncrementalBackupTableSet("root"); assertTrue(tables1.size() == res1.size()); Iterator desc1 = tables1.descendingIterator(); Iterator desc2 = res1.descendingIterator(); @@ -198,9 +242,9 @@ public class TestBackupSystemTable { assertEquals(desc1.next(), desc2.next()); } - table.addIncrementalBackupTableSet(tables2); - TreeSet res2 = (TreeSet) BackupSystemTableHelper - .getIncrementalBackupTableSet(conn); + table.addIncrementalBackupTableSet(tables2, "root"); + TreeSet res2 = (TreeSet) + table.getIncrementalBackupTableSet("root"); assertTrue((tables2.size() + tables1.size() - 1) == res2.size()); tables1.addAll(tables2); @@ -229,9 +273,9 @@ public class TestBackupSystemTable { rsTimestampMap.put("rs2", 101L); rsTimestampMap.put("rs3", 103L); - table.writeRegionServerLogTimestamp(tables, rsTimestampMap); + table.writeRegionServerLogTimestamp(tables, rsTimestampMap, "root"); - HashMap> result = table.readLogTimestampMap(); + HashMap> result = table.readLogTimestampMap("root"); assertTrue(tables.size() == result.size()); @@ -255,9 +299,9 @@ public class TestBackupSystemTable { rsTimestampMap1.put("rs2", 201L); rsTimestampMap1.put("rs3", 203L); - table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1); + table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1, "root"); - result = table.readLogTimestampMap(); + result = table.readLogTimestampMap("root"); assertTrue(5 == result.size()); @@ -295,7 +339,7 @@ public class TestBackupSystemTable { "hdfs://server/WALs/srv3,103,17777/srv3,103,17777.default.3"); String newFile = "hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.5"; - table.addWALFiles(files, "backup"); + table.addWALFiles(files, "backup", "root"); assertTrue(table.checkWALFile(files.get(0))); assertTrue(table.checkWALFile(files.get(1))); @@ -305,26 +349,155 @@ public class TestBackupSystemTable { cleanBackupTable(); } - private boolean compare(BackupContext ctx, BackupCompleteData data) { + + /** + * Backup set tests + */ + + @Test + public void testBackupSetAddNotExists() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals(tables[i])); + } + cleanBackupTable(); + } - return ctx.getBackupId().equals(data.getBackupToken()) - && ctx.getTargetRootDir().equals(data.getBackupRootPath()) - && ctx.getType().toString().equals(data.getType()) - && ctx.getStartTs() == Long.parseLong(data.getStartTime()) - && ctx.getEndTs() == Long.parseLong(data.getEndTime()); + } + @Test + public void testBackupSetAddExists() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] addTables = new String[] { "table4", "table5", "table6" }; + table.addToBackupSet(setName, addTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length + addTables.length); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetAddExistsIntersects() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] addTables = new String[] { "table3", "table4", "table5", "table6" }; + table.addToBackupSet(setName, addTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size()== tables.length + addTables.length - 1); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetRemoveSomeNotExists() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] removeTables = new String[] { "table4", "table5", "table6" }; + table.removeFromBackupSet(setName, removeTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length - 1); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetRemove() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] removeTables = new String[] { "table4", "table3" }; + table.removeFromBackupSet(setName, removeTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length - 2); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetDelete() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + table.deleteBackupSet(setName); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames.size() == 0); + cleanBackupTable(); + } + } + + @Test + public void testBackupSetList() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName1 = "name1"; + String setName2 = "name2"; + table.addToBackupSet(setName1, tables); + table.addToBackupSet(setName2, tables); + + List list = table.listBackupSets(); + + assertTrue(list.size() == 2); + assertTrue(list.get(0).equals(setName1)); + assertTrue(list.get(1).equals(setName2)); + + cleanBackupTable(); + } } + - private boolean compare(BackupContext one, BackupContext two) { + private boolean compare(BackupInfo one, BackupInfo two) { return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) && one.getTargetRootDir().equals(two.getTargetRootDir()) && one.getStartTs() == two.getStartTs() && one.getEndTs() == two.getEndTs(); } - private BackupContext createBackupContext() { + private BackupInfo createBackupContext() { - BackupContext ctxt = - new BackupContext("backup_" + System.nanoTime(), BackupType.FULL, + BackupInfo ctxt = + new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] { TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, "/hbase/backup"); @@ -333,8 +506,8 @@ public class TestBackupSystemTable { return ctxt; } - private List createBackupContextList(int size) { - List list = new ArrayList(); + private List createBackupContextList(int size) { + List list = new ArrayList(); for (int i = 0; i < size; i++) { list.add(createBackupContext()); try { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java index 9dda9673bc67a717bd66dbacbd92b66d92708b1e..0c0bf4a8438a530f66dea71fdae3dc046c2a0b29 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -11,12 +11,17 @@ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -36,10 +41,36 @@ public class TestFullBackup extends TestBackupBase { LOG.info("test full backup on a single table with data"); List tables = Lists.newArrayList(table1); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); LOG.info("backup complete for " + backupId); } /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupSingleCommand() throws Exception { + LOG.info("test full backup on a single table with data: command-line"); + try(BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + int before = table.getBackupHistory().size(); + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, table1.getNameAsString() }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + int after = table.getBackupHistory().size(); + assertTrue(after == before +1); + for(BackupInfo data : backups){ + String backupId = data.getBackupId(); + assertTrue(checkSucceeded(backupId)); + } + } + LOG.info("backup complete"); + } + + + /** * Verify that full backup is created on multiple tables correctly. * @throws Exception */ @@ -48,8 +79,29 @@ public class TestFullBackup extends TestBackupBase { LOG.info("create full backup image on multiple tables with data"); List tables = Lists.newArrayList(table1, table1); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); } + @Test + public void testFullBackupMultipleCommand() throws Exception { + LOG.info("test full backup on a multiple tables with data: command-line"); + try(BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + int before = table.getBackupHistory().size(); + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, + table1.getNameAsString() +","+ table2.getNameAsString() }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + int after = table.getBackupHistory().size(); + assertTrue(after == before +1); + for(BackupInfo data : backups){ + String backupId = data.getBackupId(); + assertTrue(checkSucceeded(backupId)); + } + } + LOG.info("backup complete"); + } /** * Verify that full backup is created on all tables correctly. * @throws Exception @@ -58,5 +110,26 @@ public class TestFullBackup extends TestBackupBase { public void testFullBackupAll() throws Exception { LOG.info("create full backup image on all tables"); String backupId = fullTableBackup(null); + assertTrue(checkSucceeded(backupId)); + + } + + @Test + public void testFullBackupAllCommand() throws Exception { + LOG.info("create full backup image on all tables: command-line"); + try(BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + int before = table.getBackupHistory().size(); + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + int after = table.getBackupHistory().size(); + assertTrue(after == before +1); + for(BackupInfo data : backups){ + String backupId = data.getBackupId(); + assertTrue(checkSucceeded(backupId)); + } + } } } \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java new file mode 100644 index 0000000000000000000000000000000000000000..a4c0fa15afc8b5688c47d3ed5227d674c1385469 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestFullBackupSet extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullBackupSet.class); + + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupSetExist() throws Exception { + + LOG.info("TFBSE test full backup, backup set exists"); + + //Create set + try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + String name = "name"; + table.addToBackupSet(name, new String[] { table1.getNameAsString() }); + List names = table.describeBackupSet(name); + + assertNotNull(names); + assertTrue(names.size() == 1); + assertTrue(names.get(0).equals(table1)); + + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-set", name }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + assertTrue(backups.size() == 1); + String backupId = backups.get(0).getBackupId(); + assertTrue(checkSucceeded(backupId)); + LOG.info("TFBSE backup complete"); + } + + } + + @Test + public void testFullBackupSetDoesNotExist() throws Exception { + + LOG.info("TFBSE test full backup, backup set does not exist"); + String name = "name1"; + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, "-set", name }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret != 0); + + } + +} \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 3139134f9fb02f4ccab0d462249ec6b4e1c71529..983b850152c40f2502be0917cb0d70ef166fea3a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -16,11 +16,13 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -42,6 +44,8 @@ public class TestFullRestore extends TestBackupBase { List tables = Lists.newArrayList(table1); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; @@ -54,6 +58,29 @@ public class TestFullRestore extends TestBackupBase { hba.close(); } + + @Test + public void testFullRestoreSingleCommand() throws Exception { + + LOG.info("test full restore on a single table empty table: command-line"); + + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + //restore [tableMapping] + String[] args = new String[]{"restore", BACKUP_ROOT_DIR, backupId, + table1.getNameAsString(), table1_restore.getNameAsString() }; + // Run backup + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + + assertTrue(ret==0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + hba.close(); + } + /** * Verify that multiple tables are restored to new tables. * @throws Exception @@ -63,6 +90,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; @@ -78,6 +106,38 @@ public class TestFullRestore extends TestBackupBase { } /** + * Verify that multiple tables are restored to new tables. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleCommand() throws Exception { + LOG.info("create full backup image on multiple tables: command-line"); + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + + + //restore [tableMapping] + String[] args = new String[]{"restore", BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), + StringUtils.join(tablemap, ",") }; + // Run backup + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + + assertTrue(ret==0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2_restore)); + assertTrue(hba.tableExists(table3_restore)); + TEST_UTIL.deleteTable(table2_restore); + TEST_UTIL.deleteTable(table3_restore); + hba.close(); + } + + + /** * Verify that a single table is restored using overwrite * @throws Exception */ @@ -87,6 +147,8 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; @@ -96,6 +158,32 @@ public class TestFullRestore extends TestBackupBase { } /** + * Verify that a single table is restored using overwrite + * @throws Exception + */ + @Test + public void testFullRestoreSingleOverwriteCommand() throws Exception { + + LOG.info("test full restore on a single table empty table: command-line"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + TableName[] tableset = new TableName[] { table1 }; + //restore [tableMapping] + String[] args = new String[]{"restore", BACKUP_ROOT_DIR, backupId, + StringUtils.join(tableset, ","), "-overwrite" }; + // Run restore + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret==0); + + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1)); + hba.close(); + + } + + /** * Verify that multiple tables are restored to new tables using overwrite. * @throws Exception */ @@ -105,6 +193,7 @@ public class TestFullRestore extends TestBackupBase { List tables = Lists.newArrayList(table2, table3); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { table2, table3 }; RestoreClient client = getRestoreClient(); @@ -113,6 +202,32 @@ public class TestFullRestore extends TestBackupBase { } /** + * Verify that multiple tables are restored to new tables using overwrite. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleOverwriteCommand() throws Exception { + LOG.info("create full backup image on multiple tables: command-line"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + //restore [tableMapping] + String[] args = new String[]{"restore", BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), "-overwrite" }; + // Run backup + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + + assertTrue(ret==0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2)); + assertTrue(hba.tableExists(table3)); + hba.close(); + } + + /** * Verify that restore fails on a single table that does not exist. * @throws Exception */ @@ -122,6 +237,7 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test restore fails on a single table that does not exist"); List tables = Lists.newArrayList(table1); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); @@ -132,6 +248,31 @@ public class TestFullRestore extends TestBackupBase { false); } + + /** + * Verify that restore fails on a single table that does not exist. + * @throws Exception + */ + @Test + public void testFullRestoreSingleDNECommand() throws Exception { + + LOG.info("test restore fails on a single table that does not exist: command-line"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; + TableName[] tablemap = new TableName[] { table1_restore }; + String[] args = new String[]{"restore", BACKUP_ROOT_DIR, backupId, + StringUtils.join(tableset, ","), + StringUtils.join(tablemap, ",") }; + // Run restore + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret != 0); + + } /** * Verify that restore fails on multiple tables that do not exist. * @throws Exception @@ -143,6 +284,7 @@ public class TestFullRestore extends TestBackupBase { List tables = Lists.newArrayList(table2, table3); String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); TableName[] restore_tableset = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; @@ -151,4 +293,28 @@ public class TestFullRestore extends TestBackupBase { client.restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset, tablemap, false); } + + /** + * Verify that restore fails on multiple tables that do not exist. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleDNECommand() throws Exception { + + LOG.info("test restore fails on multiple tables that do not exist: command-line"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset + = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + String[] args = new String[]{"restore", BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), + StringUtils.join(tablemap, ",") }; + // Run restore + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret != 0); + } } \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 6669612b91d8b702f5e6af9cf6d17df4ef72fdb3..04ee0157f23548ed1bae6897c8b9fb39993bda7a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -24,7 +24,10 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -43,7 +46,7 @@ import com.google.common.collect.Lists; @Category(LargeTests.class) public class TestIncrementalBackup extends TestBackupBase { private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class); - //implement all testcases in 1 test since incremental backup/restore has dependencies + //implement all test cases in 1 test since incremental backup/restore has dependencies @Test public void TestIncBackupRestore() throws Exception { // #1 - create full backup for all tables @@ -89,6 +92,7 @@ public class TestIncrementalBackup extends TestBackupBase { request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); String backupIdIncMultiple = admin.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 - restore full backup for all tables, without overwrite TableName[] tablesRestoreFull = @@ -133,7 +137,6 @@ public class TestIncrementalBackup extends TestBackupBase { new TableName[] { table1, table2, table3 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore, table3_restore }; - LOG.info("restore inc backup " + backupIdIncMultiple); client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupIdIncMultiple, false, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java new file mode 100644 index 0000000000000000000000000000000000000000..b3cf4ee0220155b6214861c8aef04f85897c3b93 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestIncrementalBackupNoDataLoss extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestIncrementalBackupNoDataLoss.class); + + // implement all test cases in 1 test since incremental backup/restore has dependencies + @Test + public void TestIncBackupRestore() throws Exception { + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tables = Lists.newArrayList(table1, table2); + String backupIdFull = fullTableBackup(tables); + assertTrue(checkSucceeded(backupIdFull)); + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + t2.close(); + + // #3 - incremental backup for table1 + + tables = Lists.newArrayList(table1); + String backupIdInc1 = incrementalTableBackup(tables); + assertTrue(checkSucceeded(backupIdInc1)); + + // #4 - incremental backup for table2 + + tables = Lists.newArrayList(table2); + String backupIdInc2 = incrementalTableBackup(tables); + assertTrue(checkSucceeded(backupIdInc2)); + // #5 - restore incremental backup for table1 + TableName[] tablesRestoreInc1 = new TableName[] { table1 }; + TableName[] tablesMapInc1 = new TableName[] { table1_restore }; + + if (TEST_UTIL.getAdmin().tableExists(table1_restore)) { + TEST_UTIL.deleteTable(table1_restore); + } + if (TEST_UTIL.getAdmin().tableExists(table2_restore)) { + TEST_UTIL.deleteTable(table2_restore); + } + + RestoreClient client = getRestoreClient(); + client.restore(BACKUP_ROOT_DIR, backupIdInc1, false, true, tablesRestoreInc1, + tablesMapInc1, false); + + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + hTable.close(); + + // #5 - restore incremental backup for table2 + + TableName[] tablesRestoreInc2 = new TableName[] { table2 }; + TableName[] tablesMapInc2 = new TableName[] { table2_restore }; + + client = getRestoreClient(); + client.restore(BACKUP_ROOT_DIR, backupIdInc2, false, true, tablesRestoreInc2, + tablesMapInc2, false); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + hTable.close(); + + conn.close(); + } + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index f97f30a06bc2afa9235e4699a439cc5f2e9a3f80..e29a4a6538209f044034194415769dac5548e461 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -11,6 +11,8 @@ package org.apache.hadoop.hbase.backup; +import static org.junit.Assert.assertTrue; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -35,6 +37,8 @@ public class TestRemoteBackup extends TestBackupBase { String backupId = backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete " + backupId); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index cd597e11cd2045780753f1aeb39d08afff3cde1b..32a028cd9f9949805f97d0e9e8db87d2e7ebd433 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -15,7 +15,6 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -40,7 +39,6 @@ public class TestRemoteRestore extends TestBackupBase { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - Path path = new Path(BACKUP_REMOTE_ROOT_DIR); getRestoreClient().restore(BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset, tablemap, false); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index acded775f5c6b533ca59b83b3fe33b3229376dc0..652a9091f41d1344316c645e9bc4751627853834 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -24,7 +24,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests;