optional .RegionSpecifier region = 1;
+ * optional .TableSchema table = 1;
+ */
+ boolean hasTable();
+ /**
+ * optional .TableSchema table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable();
+ /**
+ * optional .TableSchema table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder();
+
+ // optional .RegionInfo region = 2;
+ /**
+ * optional .RegionInfo region = 2;
*/
boolean hasRegion();
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion();
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder();
- // repeated string locations = 2;
+ // repeated string locations = 3;
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
java.util.Listrepeated string locations = 2;
+ * repeated string locations = 3;
*/
int getLocationsCount();
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
java.lang.String getLocations(int index);
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
com.google.protobuf.ByteString
getLocationsBytes(int index);
@@ -806,22 +820,35 @@ public final class MapReduceProtos {
break;
}
case 10: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = table_.toBuilder();
+ }
+ table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(table_);
+ table_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = region_.toBuilder();
}
- region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
+ region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(region_);
region_ = subBuilder.buildPartial();
}
- bitField0_ |= 0x00000001;
+ bitField0_ |= 0x00000002;
break;
}
- case 18: {
- if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
locations_ = new com.google.protobuf.LazyStringArrayList();
- mutable_bitField0_ |= 0x00000002;
+ mutable_bitField0_ |= 0x00000004;
}
locations_.add(input.readBytes());
break;
@@ -834,7 +861,7 @@ public final class MapReduceProtos {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
- if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
locations_ = new com.google.protobuf.UnmodifiableLazyStringList(locations_);
}
this.unknownFields = unknownFields.build();
@@ -869,52 +896,74 @@ public final class MapReduceProtos {
}
private int bitField0_;
- // optional .RegionSpecifier region = 1;
- public static final int REGION_FIELD_NUMBER = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_;
+ // optional .TableSchema table = 1;
+ public static final int TABLE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_;
/**
- * optional .RegionSpecifier region = 1;
+ * optional .TableSchema table = 1;
*/
- public boolean hasRegion() {
+ public boolean hasTable() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .TableSchema table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() {
+ return table_;
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() {
+ return table_;
+ }
+
+ // optional .RegionInfo region = 2;
+ public static final int REGION_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_;
+ /**
+ * optional .RegionInfo region = 2;
+ */
+ public boolean hasRegion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .RegionInfo region = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() {
return region_;
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() {
return region_;
}
- // repeated string locations = 2;
- public static final int LOCATIONS_FIELD_NUMBER = 2;
+ // repeated string locations = 3;
+ public static final int LOCATIONS_FIELD_NUMBER = 3;
private com.google.protobuf.LazyStringList locations_;
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public java.util.Listrepeated string locations = 2;
+ * repeated string locations = 3;
*/
public int getLocationsCount() {
return locations_.size();
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public java.lang.String getLocations(int index) {
return locations_.get(index);
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public com.google.protobuf.ByteString
getLocationsBytes(int index) {
@@ -922,7 +971,8 @@ public final class MapReduceProtos {
}
private void initFields() {
- region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
@@ -930,6 +980,12 @@ public final class MapReduceProtos {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ if (hasTable()) {
+ if (!getTable().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
if (hasRegion()) {
if (!getRegion().isInitialized()) {
memoizedIsInitialized = 0;
@@ -944,10 +1000,13 @@ public final class MapReduceProtos {
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeMessage(1, region_);
+ output.writeMessage(1, table_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, region_);
}
for (int i = 0; i < locations_.size(); i++) {
- output.writeBytes(2, locations_.getByteString(i));
+ output.writeBytes(3, locations_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
@@ -960,7 +1019,11 @@ public final class MapReduceProtos {
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(1, region_);
+ .computeMessageSize(1, table_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, region_);
}
{
int dataSize = 0;
@@ -994,6 +1057,11 @@ public final class MapReduceProtos {
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other = (org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit) obj;
boolean result = true;
+ result = result && (hasTable() == other.hasTable());
+ if (hasTable()) {
+ result = result && getTable()
+ .equals(other.getTable());
+ }
result = result && (hasRegion() == other.hasRegion());
if (hasRegion()) {
result = result && getRegion()
@@ -1014,6 +1082,10 @@ public final class MapReduceProtos {
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTable()) {
+ hash = (37 * hash) + TABLE_FIELD_NUMBER;
+ hash = (53 * hash) + getTable().hashCode();
+ }
if (hasRegion()) {
hash = (37 * hash) + REGION_FIELD_NUMBER;
hash = (53 * hash) + getRegion().hashCode();
@@ -1123,6 +1195,7 @@ public final class MapReduceProtos {
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableFieldBuilder();
getRegionFieldBuilder();
}
}
@@ -1132,14 +1205,20 @@ public final class MapReduceProtos {
public Builder clear() {
super.clear();
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
if (regionBuilder_ == null) {
- region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
} else {
regionBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000001);
- locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
+ locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -1171,15 +1250,23 @@ public final class MapReduceProtos {
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
+ if (tableBuilder_ == null) {
+ result.table_ = table_;
+ } else {
+ result.table_ = tableBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
if (regionBuilder_ == null) {
result.region_ = region_;
} else {
result.region_ = regionBuilder_.build();
}
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
locations_ = new com.google.protobuf.UnmodifiableLazyStringList(
locations_);
- bitField0_ = (bitField0_ & ~0x00000002);
+ bitField0_ = (bitField0_ & ~0x00000004);
}
result.locations_ = locations_;
result.bitField0_ = to_bitField0_;
@@ -1198,13 +1285,16 @@ public final class MapReduceProtos {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.getDefaultInstance()) return this;
+ if (other.hasTable()) {
+ mergeTable(other.getTable());
+ }
if (other.hasRegion()) {
mergeRegion(other.getRegion());
}
if (!other.locations_.isEmpty()) {
if (locations_.isEmpty()) {
locations_ = other.locations_;
- bitField0_ = (bitField0_ & ~0x00000002);
+ bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureLocationsIsMutable();
locations_.addAll(other.locations_);
@@ -1216,6 +1306,12 @@ public final class MapReduceProtos {
}
public final boolean isInitialized() {
+ if (hasTable()) {
+ if (!getTable().isInitialized()) {
+
+ return false;
+ }
+ }
if (hasRegion()) {
if (!getRegion().isInitialized()) {
@@ -1244,20 +1340,137 @@ public final class MapReduceProtos {
}
private int bitField0_;
- // optional .RegionSpecifier region = 1;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ // optional .TableSchema table = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_;
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableBuilder_;
/**
- * optional .RegionSpecifier region = 1;
+ * optional .TableSchema table = 1;
*/
- public boolean hasRegion() {
+ public boolean hasTable() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .TableSchema table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() {
+ if (tableBuilder_ == null) {
+ return table_;
+ } else {
+ return tableBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ table_ = value;
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ public Builder setTable(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableBuilder_ == null) {
+ table_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ table_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(table_).mergeFrom(value).buildPartial();
+ } else {
+ table_ = value;
+ }
+ onChanged();
+ } else {
+ tableBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ public Builder clearTable() {
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * optional .TableSchema table = 1;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() {
+ if (tableBuilder_ != null) {
+ return tableBuilder_.getMessageOrBuilder();
+ } else {
+ return table_;
+ }
+ }
+ /**
+ * optional .TableSchema table = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableFieldBuilder() {
+ if (tableBuilder_ == null) {
+ tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ table_,
+ getParentForChildren(),
+ isClean());
+ table_ = null;
+ }
+ return tableBuilder_;
+ }
+
+ // optional .RegionInfo region = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_;
+ /**
+ * optional .RegionInfo region = 2;
+ */
+ public boolean hasRegion() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .RegionInfo region = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() {
if (regionBuilder_ == null) {
return region_;
} else {
@@ -1265,9 +1478,9 @@ public final class MapReduceProtos {
}
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
if (regionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@@ -1277,32 +1490,32 @@ public final class MapReduceProtos {
} else {
regionBuilder_.setMessage(value);
}
- bitField0_ |= 0x00000001;
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
public Builder setRegion(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
if (regionBuilder_ == null) {
region_ = builderForValue.build();
onChanged();
} else {
regionBuilder_.setMessage(builderForValue.build());
}
- bitField0_ |= 0x00000001;
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
if (regionBuilder_ == null) {
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
- region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
region_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial();
} else {
region_ = value;
}
@@ -1310,34 +1523,34 @@ public final class MapReduceProtos {
} else {
regionBuilder_.mergeFrom(value);
}
- bitField0_ |= 0x00000001;
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
public Builder clearRegion() {
if (regionBuilder_ == null) {
- region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
onChanged();
} else {
regionBuilder_.clear();
}
- bitField0_ = (bitField0_ & ~0x00000001);
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() {
- bitField0_ |= 0x00000001;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() {
+ bitField0_ |= 0x00000002;
onChanged();
return getRegionFieldBuilder().getBuilder();
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() {
if (regionBuilder_ != null) {
return regionBuilder_.getMessageOrBuilder();
} else {
@@ -1345,14 +1558,14 @@ public final class MapReduceProtos {
}
}
/**
- * optional .RegionSpecifier region = 1;
+ * optional .RegionInfo region = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
getRegionFieldBuilder() {
if (regionBuilder_ == null) {
regionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
region_,
getParentForChildren(),
isClean());
@@ -1361,42 +1574,42 @@ public final class MapReduceProtos {
return regionBuilder_;
}
- // repeated string locations = 2;
+ // repeated string locations = 3;
private com.google.protobuf.LazyStringList locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureLocationsIsMutable() {
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
locations_ = new com.google.protobuf.LazyStringArrayList(locations_);
- bitField0_ |= 0x00000002;
+ bitField0_ |= 0x00000004;
}
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public java.util.Listrepeated string locations = 2;
+ * repeated string locations = 3;
*/
public int getLocationsCount() {
return locations_.size();
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public java.lang.String getLocations(int index) {
return locations_.get(index);
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public com.google.protobuf.ByteString
getLocationsBytes(int index) {
return locations_.getByteString(index);
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public Builder setLocations(
int index, java.lang.String value) {
@@ -1409,7 +1622,7 @@ public final class MapReduceProtos {
return this;
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public Builder addLocations(
java.lang.String value) {
@@ -1422,7 +1635,7 @@ public final class MapReduceProtos {
return this;
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public Builder addAllLocations(
java.lang.Iterablerepeated string locations = 2;
+ * repeated string locations = 3;
*/
public Builder clearLocations() {
locations_ = com.google.protobuf.LazyStringArrayList.EMPTY;
- bitField0_ = (bitField0_ & ~0x00000002);
+ bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
/**
- * repeated string locations = 2;
+ * repeated string locations = 3;
*/
public Builder addLocationsBytes(
com.google.protobuf.ByteString value) {
@@ -1485,11 +1698,12 @@ public final class MapReduceProtos {
static {
java.lang.String[] descriptorData = {
"\n\017MapReduce.proto\032\013HBase.proto\".\n\013ScanMe" +
- "trics\022\037\n\007metrics\030\001 \003(\0132\016.NameInt64Pair\"O" +
- "\n\030TableSnapshotRegionSplit\022 \n\006region\030\001 \001" +
- "(\0132\020.RegionSpecifier\022\021\n\tlocations\030\002 \003(\tB" +
- "B\n*org.apache.hadoop.hbase.protobuf.gene" +
- "ratedB\017MapReduceProtosH\001\240\001\001"
+ "trics\022\037\n\007metrics\030\001 \003(\0132\016.NameInt64Pair\"g" +
+ "\n\030TableSnapshotRegionSplit\022\033\n\005table\030\001 \001(" +
+ "\0132\014.TableSchema\022\033\n\006region\030\002 \001(\0132\013.Region" +
+ "Info\022\021\n\tlocations\030\003 \003(\tBB\n*org.apache.ha" +
+ "doop.hbase.protobuf.generatedB\017MapReduce" +
+ "ProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -1507,7 +1721,7 @@ public final class MapReduceProtos {
internal_static_TableSnapshotRegionSplit_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TableSnapshotRegionSplit_descriptor,
- new java.lang.String[] { "Region", "Locations", });
+ new java.lang.String[] { "Table", "Region", "Locations", });
return null;
}
};
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
new file mode 100644
index 0000000..8dbb5ad
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java
@@ -0,0 +1,4787 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Snapshot.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class SnapshotProtos {
+ private SnapshotProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface SnapshotFileInfoOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .SnapshotFileInfo.Type type = 1;
+ /**
+ * required .SnapshotFileInfo.Type type = 1;
+ */
+ boolean hasType();
+ /**
+ * required .SnapshotFileInfo.Type type = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type getType();
+
+ // optional string hfile = 3;
+ /**
+ * optional string hfile = 3;
+ */
+ boolean hasHfile();
+ /**
+ * optional string hfile = 3;
+ */
+ java.lang.String getHfile();
+ /**
+ * optional string hfile = 3;
+ */
+ com.google.protobuf.ByteString
+ getHfileBytes();
+
+ // optional string wal_server = 4;
+ /**
+ * optional string wal_server = 4;
+ */
+ boolean hasWalServer();
+ /**
+ * optional string wal_server = 4;
+ */
+ java.lang.String getWalServer();
+ /**
+ * optional string wal_server = 4;
+ */
+ com.google.protobuf.ByteString
+ getWalServerBytes();
+
+ // optional string wal_name = 5;
+ /**
+ * optional string wal_name = 5;
+ */
+ boolean hasWalName();
+ /**
+ * optional string wal_name = 5;
+ */
+ java.lang.String getWalName();
+ /**
+ * optional string wal_name = 5;
+ */
+ com.google.protobuf.ByteString
+ getWalNameBytes();
+ }
+ /**
+ * Protobuf type {@code SnapshotFileInfo}
+ */
+ public static final class SnapshotFileInfo extends
+ com.google.protobuf.GeneratedMessage
+ implements SnapshotFileInfoOrBuilder {
+ // Use SnapshotFileInfo.newBuilder() to construct.
+ private SnapshotFileInfo(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SnapshotFileInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SnapshotFileInfo defaultInstance;
+ public static SnapshotFileInfo getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SnapshotFileInfo getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SnapshotFileInfo(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type value = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(1, rawValue);
+ } else {
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ }
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000002;
+ hfile_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000004;
+ walServer_ = input.readBytes();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000008;
+ walName_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class);
+ }
+
+ public static com.google.protobuf.ParserHFILE = 1;
+ */
+ HFILE(0, 1),
+ /**
+ * WAL = 2;
+ */
+ WAL(1, 2),
+ ;
+
+ /**
+ * HFILE = 1;
+ */
+ public static final int HFILE_VALUE = 1;
+ /**
+ * WAL = 2;
+ */
+ public static final int WAL_VALUE = 2;
+
+
+ public final int getNumber() { return value; }
+
+ public static Type valueOf(int value) {
+ switch (value) {
+ case 1: return HFILE;
+ case 2: return WAL;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMaprequired .SnapshotFileInfo.Type type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .SnapshotFileInfo.Type type = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type getType() {
+ return type_;
+ }
+
+ // optional string hfile = 3;
+ public static final int HFILE_FIELD_NUMBER = 3;
+ private java.lang.Object hfile_;
+ /**
+ * optional string hfile = 3;
+ */
+ public boolean hasHfile() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public java.lang.String getHfile() {
+ java.lang.Object ref = hfile_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ hfile_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public com.google.protobuf.ByteString
+ getHfileBytes() {
+ java.lang.Object ref = hfile_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hfile_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string wal_server = 4;
+ public static final int WAL_SERVER_FIELD_NUMBER = 4;
+ private java.lang.Object walServer_;
+ /**
+ * optional string wal_server = 4;
+ */
+ public boolean hasWalServer() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public java.lang.String getWalServer() {
+ java.lang.Object ref = walServer_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ walServer_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public com.google.protobuf.ByteString
+ getWalServerBytes() {
+ java.lang.Object ref = walServer_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ walServer_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string wal_name = 5;
+ public static final int WAL_NAME_FIELD_NUMBER = 5;
+ private java.lang.Object walName_;
+ /**
+ * optional string wal_name = 5;
+ */
+ public boolean hasWalName() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public java.lang.String getWalName() {
+ java.lang.Object ref = walName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ walName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public com.google.protobuf.ByteString
+ getWalNameBytes() {
+ java.lang.Object ref = walName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ walName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ type_ = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.HFILE;
+ hfile_ = "";
+ walServer_ = "";
+ walName_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeEnum(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(3, getHfileBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(4, getWalServerBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(5, getWalNameBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getHfileBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getWalServerBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getWalNameBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo) obj;
+
+ boolean result = true;
+ result = result && (hasType() == other.hasType());
+ if (hasType()) {
+ result = result &&
+ (getType() == other.getType());
+ }
+ result = result && (hasHfile() == other.hasHfile());
+ if (hasHfile()) {
+ result = result && getHfile()
+ .equals(other.getHfile());
+ }
+ result = result && (hasWalServer() == other.hasWalServer());
+ if (hasWalServer()) {
+ result = result && getWalServer()
+ .equals(other.getWalServer());
+ }
+ result = result && (hasWalName() == other.hasWalName());
+ if (hasWalName()) {
+ result = result && getWalName()
+ .equals(other.getWalName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasType()) {
+ hash = (37 * hash) + TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getType());
+ }
+ if (hasHfile()) {
+ hash = (37 * hash) + HFILE_FIELD_NUMBER;
+ hash = (53 * hash) + getHfile().hashCode();
+ }
+ if (hasWalServer()) {
+ hash = (37 * hash) + WAL_SERVER_FIELD_NUMBER;
+ hash = (53 * hash) + getWalServer().hashCode();
+ }
+ if (hasWalName()) {
+ hash = (37 * hash) + WAL_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getWalName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SnapshotFileInfo}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builderrequired .SnapshotFileInfo.Type type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .SnapshotFileInfo.Type type = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type getType() {
+ return type_;
+ }
+ /**
+ * required .SnapshotFileInfo.Type type = 1;
+ */
+ public Builder setType(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .SnapshotFileInfo.Type type = 1;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.HFILE;
+ onChanged();
+ return this;
+ }
+
+ // optional string hfile = 3;
+ private java.lang.Object hfile_ = "";
+ /**
+ * optional string hfile = 3;
+ */
+ public boolean hasHfile() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public java.lang.String getHfile() {
+ java.lang.Object ref = hfile_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ hfile_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public com.google.protobuf.ByteString
+ getHfileBytes() {
+ java.lang.Object ref = hfile_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hfile_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public Builder setHfile(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ hfile_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public Builder clearHfile() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ hfile_ = getDefaultInstance().getHfile();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string hfile = 3;
+ */
+ public Builder setHfileBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ hfile_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string wal_server = 4;
+ private java.lang.Object walServer_ = "";
+ /**
+ * optional string wal_server = 4;
+ */
+ public boolean hasWalServer() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public java.lang.String getWalServer() {
+ java.lang.Object ref = walServer_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ walServer_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public com.google.protobuf.ByteString
+ getWalServerBytes() {
+ java.lang.Object ref = walServer_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ walServer_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public Builder setWalServer(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ walServer_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public Builder clearWalServer() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ walServer_ = getDefaultInstance().getWalServer();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string wal_server = 4;
+ */
+ public Builder setWalServerBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ walServer_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string wal_name = 5;
+ private java.lang.Object walName_ = "";
+ /**
+ * optional string wal_name = 5;
+ */
+ public boolean hasWalName() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public java.lang.String getWalName() {
+ java.lang.Object ref = walName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ walName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public com.google.protobuf.ByteString
+ getWalNameBytes() {
+ java.lang.Object ref = walName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ walName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public Builder setWalName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ walName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public Builder clearWalName() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ walName_ = getDefaultInstance().getWalName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string wal_name = 5;
+ */
+ public Builder setWalNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ walName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:SnapshotFileInfo)
+ }
+
+ static {
+ defaultInstance = new SnapshotFileInfo(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:SnapshotFileInfo)
+ }
+
+ public interface SnapshotRegionManifestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional int32 version = 1;
+ /**
+ * optional int32 version = 1;
+ */
+ boolean hasVersion();
+ /**
+ * optional int32 version = 1;
+ */
+ int getVersion();
+
+ // required .RegionInfo region_info = 2;
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ boolean hasRegionInfo();
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
+
+ // repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ java.util.Listrepeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getFamilyFiles(int index);
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ int getFamilyFilesCount();
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder>
+ getFamilyFilesOrBuilderList();
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder getFamilyFilesOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code SnapshotRegionManifest}
+ */
+ public static final class SnapshotRegionManifest extends
+ com.google.protobuf.GeneratedMessage
+ implements SnapshotRegionManifestOrBuilder {
+ // Use SnapshotRegionManifest.newBuilder() to construct.
+ private SnapshotRegionManifest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SnapshotRegionManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SnapshotRegionManifest defaultInstance;
+ public static SnapshotRegionManifest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SnapshotRegionManifest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SnapshotRegionManifest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ version_ = input.readInt32();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = regionInfo_.toBuilder();
+ }
+ regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(regionInfo_);
+ regionInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ familyFiles_ = new java.util.ArrayListrequired string name = 1;
+ */
+ boolean hasName();
+ /**
+ * required string name = 1;
+ */
+ java.lang.String getName();
+ /**
+ * required string name = 1;
+ */
+ com.google.protobuf.ByteString
+ getNameBytes();
+
+ // optional .Reference reference = 2;
+ /**
+ * optional .Reference reference = 2;
+ */
+ boolean hasReference();
+ /**
+ * optional .Reference reference = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getReference();
+ /**
+ * optional .Reference reference = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder getReferenceOrBuilder();
+
+ // optional uint64 file_size = 3;
+ /**
+ * optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + boolean hasFileSize(); + /** + *
optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + long getFileSize(); + } + /** + * Protobuf type {@code SnapshotRegionManifest.StoreFile} + */ + public static final class StoreFile extends + com.google.protobuf.GeneratedMessage + implements StoreFileOrBuilder { + // Use StoreFile.newBuilder() to construct. + private StoreFile(com.google.protobuf.GeneratedMessage.Builder> builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreFile(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreFile defaultInstance; + public static StoreFile getDefaultInstance() { + return defaultInstance; + } + + public StoreFile getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreFile( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = reference_.toBuilder(); + } + reference_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(reference_); + reference_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + fileSize_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder.class); + } + + public static com.google.protobuf.Parser
required string name = 1;
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string name = 1;
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .Reference reference = 2;
+ public static final int REFERENCE_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference reference_;
+ /**
+ * optional .Reference reference = 2;
+ */
+ public boolean hasReference() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getReference() {
+ return reference_;
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder getReferenceOrBuilder() {
+ return reference_;
+ }
+
+ // optional uint64 file_size = 3;
+ public static final int FILE_SIZE_FIELD_NUMBER = 3;
+ private long fileSize_;
+ /**
+ * optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + public boolean hasFileSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + *
optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + public long getFileSize() { + return fileSize_; + } + + private void initFields() { + name_ = ""; + reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance(); + fileSize_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (hasReference()) { + if (!getReference().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, reference_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, fileSize_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, reference_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, fileSize_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && (hasReference() == other.hasReference()); + if (hasReference()) { + result = result && getReference() + .equals(other.getReference()); + } + result = result && (hasFileSize() == other.hasFileSize()); + if (hasFileSize()) { + result = result && (getFileSize() + == other.getFileSize()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (hasReference()) { + hash = (37 * hash) + REFERENCE_FIELD_NUMBER; + hash = (53 * hash) + getReference().hashCode(); + } + if (hasFileSize()) { + hash = (37 * hash) + FILE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getFileSize()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SnapshotRegionManifest.StoreFile} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder
required string name = 1;
+ */
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string name = 1;
+ */
+ public java.lang.String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string name = 1;
+ */
+ public Builder setName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string name = 1;
+ */
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string name = 1;
+ */
+ public Builder setNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional .Reference reference = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference, org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder> referenceBuilder_;
+ /**
+ * optional .Reference reference = 2;
+ */
+ public boolean hasReference() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getReference() {
+ if (referenceBuilder_ == null) {
+ return reference_;
+ } else {
+ return referenceBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public Builder setReference(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference value) {
+ if (referenceBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ reference_ = value;
+ onChanged();
+ } else {
+ referenceBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public Builder setReference(
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder builderForValue) {
+ if (referenceBuilder_ == null) {
+ reference_ = builderForValue.build();
+ onChanged();
+ } else {
+ referenceBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public Builder mergeReference(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference value) {
+ if (referenceBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ reference_ != org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance()) {
+ reference_ =
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.newBuilder(reference_).mergeFrom(value).buildPartial();
+ } else {
+ reference_ = value;
+ }
+ onChanged();
+ } else {
+ referenceBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public Builder clearReference() {
+ if (referenceBuilder_ == null) {
+ reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance();
+ onChanged();
+ } else {
+ referenceBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder getReferenceBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getReferenceFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder getReferenceOrBuilder() {
+ if (referenceBuilder_ != null) {
+ return referenceBuilder_.getMessageOrBuilder();
+ } else {
+ return reference_;
+ }
+ }
+ /**
+ * optional .Reference reference = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference, org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder>
+ getReferenceFieldBuilder() {
+ if (referenceBuilder_ == null) {
+ referenceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference, org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder>(
+ reference_,
+ getParentForChildren(),
+ isClean());
+ reference_ = null;
+ }
+ return referenceBuilder_;
+ }
+
+ // optional uint64 file_size = 3;
+ private long fileSize_ ;
+ /**
+ * optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + public boolean hasFileSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + *
optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + public long getFileSize() { + return fileSize_; + } + /** + *
optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + public Builder setFileSize(long value) { + bitField0_ |= 0x00000004; + fileSize_ = value; + onChanged(); + return this; + } + /** + *
optional uint64 file_size = 3;
+ *
+ * + * TODO: Add checksums or other fields to verify the file + *+ */ + public Builder clearFileSize() { + bitField0_ = (bitField0_ & ~0x00000004); + fileSize_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SnapshotRegionManifest.StoreFile) + } + + static { + defaultInstance = new StoreFile(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SnapshotRegionManifest.StoreFile) + } + + public interface FamilyFilesOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes family_name = 1; + /** + *
required bytes family_name = 1;
+ */
+ boolean hasFamilyName();
+ /**
+ * required bytes family_name = 1;
+ */
+ com.google.protobuf.ByteString getFamilyName();
+
+ // repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ java.util.Listrepeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getStoreFiles(int index);
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ int getStoreFilesCount();
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder>
+ getStoreFilesOrBuilderList();
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder getStoreFilesOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code SnapshotRegionManifest.FamilyFiles}
+ */
+ public static final class FamilyFiles extends
+ com.google.protobuf.GeneratedMessage
+ implements FamilyFilesOrBuilder {
+ // Use FamilyFiles.newBuilder() to construct.
+ private FamilyFiles(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private FamilyFiles(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final FamilyFiles defaultInstance;
+ public static FamilyFiles getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public FamilyFiles getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private FamilyFiles(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ familyName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ storeFiles_ = new java.util.ArrayListrequired bytes family_name = 1;
+ */
+ public boolean hasFamilyName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bytes family_name = 1;
+ */
+ public com.google.protobuf.ByteString getFamilyName() {
+ return familyName_;
+ }
+
+ // repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ public static final int STORE_FILES_FIELD_NUMBER = 2;
+ private java.util.Listrepeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public java.util.Listrepeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder>
+ getStoreFilesOrBuilderList() {
+ return storeFiles_;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public int getStoreFilesCount() {
+ return storeFiles_.size();
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getStoreFiles(int index) {
+ return storeFiles_.get(index);
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder getStoreFilesOrBuilder(
+ int index) {
+ return storeFiles_.get(index);
+ }
+
+ private void initFields() {
+ familyName_ = com.google.protobuf.ByteString.EMPTY;
+ storeFiles_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasFamilyName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getStoreFilesCount(); i++) {
+ if (!getStoreFiles(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, familyName_);
+ }
+ for (int i = 0; i < storeFiles_.size(); i++) {
+ output.writeMessage(2, storeFiles_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, familyName_);
+ }
+ for (int i = 0; i < storeFiles_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, storeFiles_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles) obj;
+
+ boolean result = true;
+ result = result && (hasFamilyName() == other.hasFamilyName());
+ if (hasFamilyName()) {
+ result = result && getFamilyName()
+ .equals(other.getFamilyName());
+ }
+ result = result && getStoreFilesList()
+ .equals(other.getStoreFilesList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasFamilyName()) {
+ hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getFamilyName().hashCode();
+ }
+ if (getStoreFilesCount() > 0) {
+ hash = (37 * hash) + STORE_FILES_FIELD_NUMBER;
+ hash = (53 * hash) + getStoreFilesList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SnapshotRegionManifest.FamilyFiles}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builderrequired bytes family_name = 1;
+ */
+ public boolean hasFamilyName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bytes family_name = 1;
+ */
+ public com.google.protobuf.ByteString getFamilyName() {
+ return familyName_;
+ }
+ /**
+ * required bytes family_name = 1;
+ */
+ public Builder setFamilyName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ familyName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bytes family_name = 1;
+ */
+ public Builder clearFamilyName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ familyName_ = getDefaultInstance().getFamilyName();
+ onChanged();
+ return this;
+ }
+
+ // repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ private java.util.Listrepeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public java.util.Listrepeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public int getStoreFilesCount() {
+ if (storeFilesBuilder_ == null) {
+ return storeFiles_.size();
+ } else {
+ return storeFilesBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getStoreFiles(int index) {
+ if (storeFilesBuilder_ == null) {
+ return storeFiles_.get(index);
+ } else {
+ return storeFilesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder setStoreFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile value) {
+ if (storeFilesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureStoreFilesIsMutable();
+ storeFiles_.set(index, value);
+ onChanged();
+ } else {
+ storeFilesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder setStoreFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder builderForValue) {
+ if (storeFilesBuilder_ == null) {
+ ensureStoreFilesIsMutable();
+ storeFiles_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ storeFilesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder addStoreFiles(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile value) {
+ if (storeFilesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureStoreFilesIsMutable();
+ storeFiles_.add(value);
+ onChanged();
+ } else {
+ storeFilesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder addStoreFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile value) {
+ if (storeFilesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureStoreFilesIsMutable();
+ storeFiles_.add(index, value);
+ onChanged();
+ } else {
+ storeFilesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder addStoreFiles(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder builderForValue) {
+ if (storeFilesBuilder_ == null) {
+ ensureStoreFilesIsMutable();
+ storeFiles_.add(builderForValue.build());
+ onChanged();
+ } else {
+ storeFilesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder addStoreFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder builderForValue) {
+ if (storeFilesBuilder_ == null) {
+ ensureStoreFilesIsMutable();
+ storeFiles_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ storeFilesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder addAllStoreFiles(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile> values) {
+ if (storeFilesBuilder_ == null) {
+ ensureStoreFilesIsMutable();
+ super.addAll(values, storeFiles_);
+ onChanged();
+ } else {
+ storeFilesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder clearStoreFiles() {
+ if (storeFilesBuilder_ == null) {
+ storeFiles_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ storeFilesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public Builder removeStoreFiles(int index) {
+ if (storeFilesBuilder_ == null) {
+ ensureStoreFilesIsMutable();
+ storeFiles_.remove(index);
+ onChanged();
+ } else {
+ storeFilesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder getStoreFilesBuilder(
+ int index) {
+ return getStoreFilesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder getStoreFilesOrBuilder(
+ int index) {
+ if (storeFilesBuilder_ == null) {
+ return storeFiles_.get(index); } else {
+ return storeFilesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder>
+ getStoreFilesOrBuilderList() {
+ if (storeFilesBuilder_ != null) {
+ return storeFilesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(storeFiles_);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder addStoreFilesBuilder() {
+ return getStoreFilesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.getDefaultInstance());
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder addStoreFilesBuilder(
+ int index) {
+ return getStoreFilesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.getDefaultInstance());
+ }
+ /**
+ * repeated .SnapshotRegionManifest.StoreFile store_files = 2;
+ */
+ public java.util.Listoptional int32 version = 1;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional int32 version = 1;
+ */
+ public int getVersion() {
+ return version_;
+ }
+
+ // required .RegionInfo region_info = 2;
+ public static final int REGION_INFO_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_;
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public boolean hasRegionInfo() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+ return regionInfo_;
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+ return regionInfo_;
+ }
+
+ // repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ public static final int FAMILY_FILES_FIELD_NUMBER = 3;
+ private java.util.Listrepeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public java.util.Listrepeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder>
+ getFamilyFilesOrBuilderList() {
+ return familyFiles_;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public int getFamilyFilesCount() {
+ return familyFiles_.size();
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getFamilyFiles(int index) {
+ return familyFiles_.get(index);
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder getFamilyFilesOrBuilder(
+ int index) {
+ return familyFiles_.get(index);
+ }
+
+ private void initFields() {
+ version_ = 0;
+ regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+ familyFiles_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasRegionInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegionInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getFamilyFilesCount(); i++) {
+ if (!getFamilyFiles(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt32(1, version_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, regionInfo_);
+ }
+ for (int i = 0; i < familyFiles_.size(); i++) {
+ output.writeMessage(3, familyFiles_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(1, version_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, regionInfo_);
+ }
+ for (int i = 0; i < familyFiles_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, familyFiles_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) obj;
+
+ boolean result = true;
+ result = result && (hasVersion() == other.hasVersion());
+ if (hasVersion()) {
+ result = result && (getVersion()
+ == other.getVersion());
+ }
+ result = result && (hasRegionInfo() == other.hasRegionInfo());
+ if (hasRegionInfo()) {
+ result = result && getRegionInfo()
+ .equals(other.getRegionInfo());
+ }
+ result = result && getFamilyFilesList()
+ .equals(other.getFamilyFilesList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasVersion()) {
+ hash = (37 * hash) + VERSION_FIELD_NUMBER;
+ hash = (53 * hash) + getVersion();
+ }
+ if (hasRegionInfo()) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfo().hashCode();
+ }
+ if (getFamilyFilesCount() > 0) {
+ hash = (37 * hash) + FAMILY_FILES_FIELD_NUMBER;
+ hash = (53 * hash) + getFamilyFilesList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SnapshotRegionManifest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builderoptional int32 version = 1;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional int32 version = 1;
+ */
+ public int getVersion() {
+ return version_;
+ }
+ /**
+ * optional int32 version = 1;
+ */
+ public Builder setVersion(int value) {
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 version = 1;
+ */
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // required .RegionInfo region_info = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public boolean hasRegionInfo() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_;
+ } else {
+ return regionInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ regionInfo_ = value;
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public Builder setRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+ regionInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
+ } else {
+ regionInfo_ = value;
+ }
+ onChanged();
+ } else {
+ regionInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getRegionInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return regionInfo_;
+ }
+ }
+ /**
+ * required .RegionInfo region_info = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ private java.util.Listrepeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public java.util.Listrepeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public int getFamilyFilesCount() {
+ if (familyFilesBuilder_ == null) {
+ return familyFiles_.size();
+ } else {
+ return familyFilesBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getFamilyFiles(int index) {
+ if (familyFilesBuilder_ == null) {
+ return familyFiles_.get(index);
+ } else {
+ return familyFilesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder setFamilyFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles value) {
+ if (familyFilesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFamilyFilesIsMutable();
+ familyFiles_.set(index, value);
+ onChanged();
+ } else {
+ familyFilesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder setFamilyFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder builderForValue) {
+ if (familyFilesBuilder_ == null) {
+ ensureFamilyFilesIsMutable();
+ familyFiles_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ familyFilesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder addFamilyFiles(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles value) {
+ if (familyFilesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFamilyFilesIsMutable();
+ familyFiles_.add(value);
+ onChanged();
+ } else {
+ familyFilesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder addFamilyFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles value) {
+ if (familyFilesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFamilyFilesIsMutable();
+ familyFiles_.add(index, value);
+ onChanged();
+ } else {
+ familyFilesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder addFamilyFiles(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder builderForValue) {
+ if (familyFilesBuilder_ == null) {
+ ensureFamilyFilesIsMutable();
+ familyFiles_.add(builderForValue.build());
+ onChanged();
+ } else {
+ familyFilesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder addFamilyFiles(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder builderForValue) {
+ if (familyFilesBuilder_ == null) {
+ ensureFamilyFilesIsMutable();
+ familyFiles_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ familyFilesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder addAllFamilyFiles(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles> values) {
+ if (familyFilesBuilder_ == null) {
+ ensureFamilyFilesIsMutable();
+ super.addAll(values, familyFiles_);
+ onChanged();
+ } else {
+ familyFilesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder clearFamilyFiles() {
+ if (familyFilesBuilder_ == null) {
+ familyFiles_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ familyFilesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public Builder removeFamilyFiles(int index) {
+ if (familyFilesBuilder_ == null) {
+ ensureFamilyFilesIsMutable();
+ familyFiles_.remove(index);
+ onChanged();
+ } else {
+ familyFilesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder getFamilyFilesBuilder(
+ int index) {
+ return getFamilyFilesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder getFamilyFilesOrBuilder(
+ int index) {
+ if (familyFilesBuilder_ == null) {
+ return familyFiles_.get(index); } else {
+ return familyFilesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder>
+ getFamilyFilesOrBuilderList() {
+ if (familyFilesBuilder_ != null) {
+ return familyFilesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(familyFiles_);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder addFamilyFilesBuilder() {
+ return getFamilyFilesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.getDefaultInstance());
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder addFamilyFilesBuilder(
+ int index) {
+ return getFamilyFilesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.getDefaultInstance());
+ }
+ /**
+ * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3;
+ */
+ public java.util.Listrequired .TableSchema table_schema = 1;
+ */
+ boolean hasTableSchema();
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema();
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder();
+
+ // repeated .SnapshotRegionManifest region_manifests = 2;
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ java.util.Listrepeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getRegionManifests(int index);
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ int getRegionManifestsCount();
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder>
+ getRegionManifestsOrBuilderList();
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder getRegionManifestsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code SnapshotDataManifest}
+ */
+ public static final class SnapshotDataManifest extends
+ com.google.protobuf.GeneratedMessage
+ implements SnapshotDataManifestOrBuilder {
+ // Use SnapshotDataManifest.newBuilder() to construct.
+ private SnapshotDataManifest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SnapshotDataManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SnapshotDataManifest defaultInstance;
+ public static SnapshotDataManifest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SnapshotDataManifest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SnapshotDataManifest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = tableSchema_.toBuilder();
+ }
+ tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableSchema_);
+ tableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ regionManifests_ = new java.util.ArrayListrequired .TableSchema table_schema = 1;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ return tableSchema_;
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ return tableSchema_;
+ }
+
+ // repeated .SnapshotRegionManifest region_manifests = 2;
+ public static final int REGION_MANIFESTS_FIELD_NUMBER = 2;
+ private java.util.Listrepeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public java.util.Listrepeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder>
+ getRegionManifestsOrBuilderList() {
+ return regionManifests_;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public int getRegionManifestsCount() {
+ return regionManifests_.size();
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getRegionManifests(int index) {
+ return regionManifests_.get(index);
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder getRegionManifestsOrBuilder(
+ int index) {
+ return regionManifests_.get(index);
+ }
+
+ private void initFields() {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ regionManifests_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTableSchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionManifestsCount(); i++) {
+ if (!getRegionManifests(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, tableSchema_);
+ }
+ for (int i = 0; i < regionManifests_.size(); i++) {
+ output.writeMessage(2, regionManifests_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, tableSchema_);
+ }
+ for (int i = 0; i < regionManifests_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, regionManifests_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest) obj;
+
+ boolean result = true;
+ result = result && (hasTableSchema() == other.hasTableSchema());
+ if (hasTableSchema()) {
+ result = result && getTableSchema()
+ .equals(other.getTableSchema());
+ }
+ result = result && getRegionManifestsList()
+ .equals(other.getRegionManifestsList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTableSchema()) {
+ hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getTableSchema().hashCode();
+ }
+ if (getRegionManifestsCount() > 0) {
+ hash = (37 * hash) + REGION_MANIFESTS_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionManifestsList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code SnapshotDataManifest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builderrequired .TableSchema table_schema = 1;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ return tableSchema_;
+ } else {
+ return tableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableSchema_ = value;
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public Builder setTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ tableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ tableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ tableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public Builder clearTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ if (tableSchemaBuilder_ != null) {
+ return tableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return tableSchema_;
+ }
+ }
+ /**
+ * required .TableSchema table_schema = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableSchemaFieldBuilder() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ tableSchema_,
+ getParentForChildren(),
+ isClean());
+ tableSchema_ = null;
+ }
+ return tableSchemaBuilder_;
+ }
+
+ // repeated .SnapshotRegionManifest region_manifests = 2;
+ private java.util.Listrepeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public java.util.Listrepeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public int getRegionManifestsCount() {
+ if (regionManifestsBuilder_ == null) {
+ return regionManifests_.size();
+ } else {
+ return regionManifestsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getRegionManifests(int index) {
+ if (regionManifestsBuilder_ == null) {
+ return regionManifests_.get(index);
+ } else {
+ return regionManifestsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder setRegionManifests(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest value) {
+ if (regionManifestsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionManifestsIsMutable();
+ regionManifests_.set(index, value);
+ onChanged();
+ } else {
+ regionManifestsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder setRegionManifests(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder builderForValue) {
+ if (regionManifestsBuilder_ == null) {
+ ensureRegionManifestsIsMutable();
+ regionManifests_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionManifestsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder addRegionManifests(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest value) {
+ if (regionManifestsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionManifestsIsMutable();
+ regionManifests_.add(value);
+ onChanged();
+ } else {
+ regionManifestsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder addRegionManifests(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest value) {
+ if (regionManifestsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionManifestsIsMutable();
+ regionManifests_.add(index, value);
+ onChanged();
+ } else {
+ regionManifestsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder addRegionManifests(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder builderForValue) {
+ if (regionManifestsBuilder_ == null) {
+ ensureRegionManifestsIsMutable();
+ regionManifests_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionManifestsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder addRegionManifests(
+ int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder builderForValue) {
+ if (regionManifestsBuilder_ == null) {
+ ensureRegionManifestsIsMutable();
+ regionManifests_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionManifestsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder addAllRegionManifests(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest> values) {
+ if (regionManifestsBuilder_ == null) {
+ ensureRegionManifestsIsMutable();
+ super.addAll(values, regionManifests_);
+ onChanged();
+ } else {
+ regionManifestsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder clearRegionManifests() {
+ if (regionManifestsBuilder_ == null) {
+ regionManifests_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ regionManifestsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public Builder removeRegionManifests(int index) {
+ if (regionManifestsBuilder_ == null) {
+ ensureRegionManifestsIsMutable();
+ regionManifests_.remove(index);
+ onChanged();
+ } else {
+ regionManifestsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder getRegionManifestsBuilder(
+ int index) {
+ return getRegionManifestsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder getRegionManifestsOrBuilder(
+ int index) {
+ if (regionManifestsBuilder_ == null) {
+ return regionManifests_.get(index); } else {
+ return regionManifestsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder>
+ getRegionManifestsOrBuilderList() {
+ if (regionManifestsBuilder_ != null) {
+ return regionManifestsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionManifests_);
+ }
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder addRegionManifestsBuilder() {
+ return getRegionManifestsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.getDefaultInstance());
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder addRegionManifestsBuilder(
+ int index) {
+ return getRegionManifestsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.getDefaultInstance());
+ }
+ /**
+ * repeated .SnapshotRegionManifest region_manifests = 2;
+ */
+ public java.util.List* Usage is similar to TableInputFormat, and - * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, + * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, * boolean, Path)} * can be used to configure the job. *
{@code
@@ -101,12 +95,12 @@ import com.google.common.annotations.VisibleForTesting;
*
* HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from
* snapshot files and data files. HBase also enforces security because all the requests are handled
- * by the server layer, and the user cannot read from the data files directly.
- * To read from snapshot files directly from the file system, the user who is running the MR job
- * must have sufficient permissions to access snapshot and reference files.
- * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase
- * user or the user must have group or other priviledges in the filesystem (See HBASE-8369).
- * Note that, given other users access to read from snapshot/data files will completely circumvent
+ * by the server layer, and the user cannot read from the data files directly.
+ * To read from snapshot files directly from the file system, the user who is running the MR job
+ * must have sufficient permissions to access snapshot and reference files.
+ * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase
+ * user or the user must have group or other priviledges in the filesystem (See HBASE-8369).
+ * Note that, given other users access to read from snapshot/data files will completely circumvent
* the access control enforced by HBase.
* @see TableSnapshotScanner
*/
@@ -119,22 +113,25 @@ public class TableSnapshotInputFormat extends InputFormat locations) {
- this.regionName = regionName;
+ TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo, List locations) {
+ this.htd = htd;
+ this.regionInfo = regionInfo;
if (locations == null || locations.isEmpty()) {
this.locations = new String[0];
} else {
@@ -158,9 +155,8 @@ public class TableSnapshotInputFormat extends InputFormat locationsList = split.getLocationsList();
this.locations = locationsList.toArray(new String[locationsList.size()]);
}
}
@VisibleForTesting
- static class TableSnapshotRegionRecordReader extends
+ static class TableSnapshotRegionRecordReader extends
RecordReader {
private TableSnapshotRegionSplit split;
private Scan scan;
@@ -205,23 +202,13 @@ public class TableSnapshotInputFormat extends InputFormat snapshotRegionNames
- = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
- if (snapshotRegionNames == null) {
+ SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
+ List regionManifests = manifest.getRegionManifests();
+ if (regionManifests == null) {
throw new IllegalArgumentException("Snapshot seems empty");
}
// load table descriptor
- HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs,
- snapshotDir);
+ HTableDescriptor htd = manifest.getTableDescriptor();
Scan scan = TableMapReduceUtil.convertStringToScan(conf
.get(TableInputFormat.SCAN));
Path tableDir = new Path(conf.get(TABLE_DIR_KEY));
List splits = new ArrayList();
- for (String regionName : snapshotRegionNames) {
+ for (SnapshotRegionManifest regionManifest : regionManifests) {
// load region descriptor
- Path regionDir = new Path(snapshotDir, regionName);
- HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs,
- regionDir);
+ HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) {
@@ -329,7 +313,7 @@ public class TableSnapshotInputFormat extends InputFormat region : regions) {
HRegionInfo regionInfo = region.getFirst();
if (regionInfo.isOffline() && (regionInfo.isSplit() || regionInfo.isSplitParent())) {
- if (!fs.exists(new Path(snapshotDir, regionInfo.getEncodedName()))) {
- LOG.info("Take disabled snapshot of offline region=" + regionInfo);
- snapshotDisabledRegion(regionInfo);
- }
+ LOG.info("Take disabled snapshot of offline region=" + regionInfo);
+ snapshotDisabledRegion(regionInfo);
}
}
} catch (InterruptedException e) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index 9578c50..c3f6f75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -19,35 +19,28 @@ package org.apache.hadoop.hbase.master.snapshot;
import java.io.IOException;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSVisitor;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
/**
* General snapshot verification on the master.
@@ -110,14 +103,16 @@ public final class MasterSnapshotVerifier {
*/
public void verifySnapshot(Path snapshotDir, Set snapshotServers)
throws CorruptedSnapshotException, IOException {
+ SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs,
+ snapshotDir, snapshot);
// verify snapshot info matches
verifySnapshotDescription(snapshotDir);
// check that tableinfo is a valid table description
- verifyTableInfo(snapshotDir);
+ verifyTableInfo(manifest);
// check that each region is valid
- verifyRegions(snapshotDir);
+ verifyRegions(manifest);
}
/**
@@ -136,8 +131,16 @@ public final class MasterSnapshotVerifier {
* Check that the table descriptor for the snapshot is a valid table descriptor
* @param snapshotDir snapshot directory to check
*/
- private void verifyTableInfo(Path snapshotDir) throws IOException {
- FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+ private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
+ HTableDescriptor htd = manifest.getTableDescriptor();
+ if (htd == null) {
+ throw new CorruptedSnapshotException("Missing Table Descriptor", snapshot);
+ }
+
+ if (!htd.getNameAsString().equals(snapshot.getTable())) {
+ throw new CorruptedSnapshotException("Invalid Table Descriptor. Expected "
+ + snapshot.getTable() + " name, got " + htd.getNameAsString(), snapshot);
+ }
}
/**
@@ -145,34 +148,36 @@ public final class MasterSnapshotVerifier {
* @param snapshotDir snapshot directory to check
* @throws IOException if we can't reach hbase:meta or read the files from the FS
*/
- private void verifyRegions(Path snapshotDir) throws IOException {
+ private void verifyRegions(final SnapshotManifest manifest) throws IOException {
List regions = MetaReader.getTableRegions(this.services.getCatalogTracker(),
tableName);
- Set snapshotRegions = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
- if (snapshotRegions == null) {
+ Map regionManifests = manifest.getRegionManifestsMap();
+ if (regionManifests == null) {
String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty";
LOG.error(msg);
throw new CorruptedSnapshotException(msg);
}
String errorMsg = "";
- if (snapshotRegions.size() != regions.size()) {
- errorMsg = "Regions moved during the snapshot '" +
+ if (regionManifests.size() != regions.size()) {
+ errorMsg = "Regions moved during the snapshot '" +
ClientSnapshotDescriptionUtils.toString(snapshot) + "'. expected=" +
- regions.size() + " snapshotted=" + snapshotRegions.size() + ".";
+ regions.size() + " snapshotted=" + regionManifests.size() + ".";
LOG.error(errorMsg);
}
for (HRegionInfo region : regions) {
- if (!snapshotRegions.contains(region.getEncodedName())) {
+ SnapshotRegionManifest regionManifest = regionManifests.get(region.getEncodedName());
+ if (regionManifest == null) {
// could happen due to a move or split race.
String mesg = " No snapshot region directory found for region:" + region;
if (errorMsg.isEmpty()) errorMsg = mesg;
LOG.error(mesg);
+ continue;
}
- verifyRegion(fs, snapshotDir, region);
+ verifyRegion(fs, manifest.getSnapshotDir(), region, regionManifest);
}
if (!errorMsg.isEmpty()) {
throw new CorruptedSnapshotException(errorMsg);
@@ -185,65 +190,24 @@ public final class MasterSnapshotVerifier {
* @param snapshotDir snapshot directory to check
* @param region the region to check
*/
- private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region)
- throws IOException {
- // make sure we have region in the snapshot
- Path regionDir = new Path(snapshotDir, region.getEncodedName());
-
- // make sure we have the region info in the snapshot
- Path regionInfo = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
- // make sure the file exists
- if (!fs.exists(regionInfo)) {
- throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot);
+ private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region,
+ final SnapshotRegionManifest manifest) throws IOException {
+ HRegionInfo manifestRegionInfo = HRegionInfo.convert(manifest.getRegionInfo());
+ if (!region.equals(manifestRegionInfo)) {
+ String msg = "Manifest region info " + manifestRegionInfo +
+ "doesn't match expected region:" + region;
+ throw new CorruptedSnapshotException(msg, snapshot);
}
- HRegionInfo found = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
- if (!region.equals(found)) {
- throw new CorruptedSnapshotException("Found region info (" + found
- + ") doesn't match expected region:" + region, snapshot);
- }
-
- // make sure we have the expected recovered edits files
- TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot);
-
- // make sure we have all the expected store files
- SnapshotReferenceUtil.visitRegionStoreFiles(fs, regionDir, new FSVisitor.StoreFileVisitor() {
- public void storeFile(final String regionNameSuffix, final String family,
- final String hfileName) throws IOException {
- verifyStoreFile(snapshotDir, region, family, hfileName);
+ // make sure we have all the expected store files
+ SnapshotReferenceUtil.visitRegionStoreFiles(manifest,
+ new SnapshotReferenceUtil.StoreFileVisitor() {
+ @Override
+ public void storeFile(final HRegionInfo regionInfo, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ SnapshotReferenceUtil.verifyStoreFile(services.getConfiguration(), fs, snapshotDir,
+ snapshot, region, family, storeFile);
}
});
}
-
- private void verifyStoreFile(final Path snapshotDir, final HRegionInfo regionInfo,
- final String family, final String fileName) throws IOException {
- Path refPath = null;
- if (StoreFileInfo.isReference(fileName)) {
- // If is a reference file check if the parent file is present in the snapshot
- Path snapshotHFilePath = new Path(new Path(
- new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName);
- refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath);
- if (!fs.exists(refPath)) {
- throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot);
- }
- }
-
- Path linkPath;
- if (refPath != null && HFileLink.isHFileLink(refPath)) {
- linkPath = new Path(family, refPath.getName());
- } else if (HFileLink.isHFileLink(fileName)) {
- linkPath = new Path(family, fileName);
- } else {
- linkPath = new Path(family, HFileLink.createHFileLinkName(tableName,
- regionInfo.getEncodedName(), fileName));
- }
-
- // check if the linked file exists (in the archive, or in the table dir)
- HFileLink link = new HFileLink(services.getConfiguration(), linkPath);
- if (!link.exists(fs)) {
- throw new CorruptedSnapshotException("Can't find hfile: " + fileName
- + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath()
- + ") directory for the primary table.", snapshot);
- }
- }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
index 2e970a1..2b97505 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
/**
* Handler to Restore a snapshot.
@@ -120,9 +121,11 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
// 2. Execute the on-disk Restore
LOG.debug("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
+ SnapshotManifest manifest = SnapshotManifest.open(masterServices.getConfiguration(), fs,
+ snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
- masterServices.getConfiguration(), fs,
- snapshot, snapshotDir, hTableDescriptor, rootDir, monitor, status);
+ masterServices.getConfiguration(), fs, manifest,
+ this.hTableDescriptor, rootDir, monitor, status);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// 3. Forces all the RegionStates to be offline
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index 0eac8be..bd9e59f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -174,19 +174,24 @@ public class SnapshotFileCache implements Stoppable {
// is an illegal access to the cache. Really we could do a mutex-guarded pointer swap on the
// cache, but that seems overkill at the moment and isn't necessarily a bottleneck.
public synchronized boolean contains(String fileName) throws IOException {
- if (this.cache.contains(fileName)) return true;
-
- refreshCache();
-
- // then check again
- return this.cache.contains(fileName);
+ boolean hasFile = this.cache.contains(fileName);
+ if (!hasFile) {
+ refreshCache();
+ // then check again
+ hasFile = this.cache.contains(fileName);
+ }
+ return hasFile;
}
private synchronized void refreshCache() throws IOException {
- // get the status of the snapshots directory and /.tmp
- FileStatus dirStatus, tempStatus;
+ long lastTimestamp = Long.MAX_VALUE;
+ boolean hasChanges = false;
+
+ // get the status of the snapshots directory and check if it is has changes
try {
- dirStatus = fs.getFileStatus(snapshotDir);
+ FileStatus dirStatus = fs.getFileStatus(snapshotDir);
+ lastTimestamp = dirStatus.getModificationTime();
+ hasChanges |= (lastTimestamp >= lastModifiedTime);
} catch (FileNotFoundException e) {
if (this.cache.size() > 0) {
LOG.error("Snapshot directory: " + snapshotDir + " doesn't exist");
@@ -194,16 +199,28 @@ public class SnapshotFileCache implements Stoppable {
return;
}
+ // get the status of the snapshots temporary directory and check if it has changes
+ // The top-level directory timestamp is not updated, so we have to check the inner-level.
try {
Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
- tempStatus = fs.getFileStatus(snapshotTmpDir);
+ FileStatus tempDirStatus = fs.getFileStatus(snapshotTmpDir);
+ lastTimestamp = Math.min(lastTimestamp, tempDirStatus.getModificationTime());
+ hasChanges |= (lastTimestamp >= lastModifiedTime);
+ if (!hasChanges) {
+ FileStatus[] tmpSnapshots = FSUtils.listStatus(fs, snapshotDir);
+ if (tmpSnapshots != null) {
+ for (FileStatus dirStatus: tmpSnapshots) {
+ lastTimestamp = Math.min(lastTimestamp, dirStatus.getModificationTime());
+ }
+ hasChanges |= (lastTimestamp >= lastModifiedTime);
+ }
+ }
} catch (FileNotFoundException e) {
- tempStatus = dirStatus;
+ // Nothing todo, if the tmp dir is empty
}
// if the snapshot directory wasn't modified since we last check, we are done
- if (dirStatus.getModificationTime() <= lastModifiedTime &&
- tempStatus.getModificationTime() <= lastModifiedTime) {
+ if (!hasChanges) {
return;
}
@@ -213,8 +230,7 @@ public class SnapshotFileCache implements Stoppable {
// However, snapshot directories are only created once, so this isn't an issue.
// 1. update the modified time
- this.lastModifiedTime = Math.min(dirStatus.getModificationTime(),
- tempStatus.getModificationTime());
+ this.lastModifiedTime = lastTimestamp;
// 2.clear the cache
this.cache.clear();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
index e82ca16..562f682 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
@@ -66,7 +66,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate {
}
@Override
- public void setConf(Configuration conf) {
+ public void setConf(final Configuration conf) {
super.setConf(conf);
try {
long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
@@ -77,7 +77,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate {
"snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
public Collection filesUnderSnapshot(final Path snapshotDir)
throws IOException {
- return SnapshotReferenceUtil.getHFileNames(fs, snapshotDir);
+ return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir);
}
});
} catch (IOException e) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 50afb42..27cd5c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -70,11 +70,11 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotExistsException;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException;
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.zookeeper.KeeperException;
@@ -540,9 +540,12 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
+ "' doesn't exist, can't take snapshot.", snapshot);
}
- // set the snapshot version, now that we are ready to take it
- snapshot = snapshot.toBuilder().setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION)
- .build();
+ // if not specified, set the snapshot format
+ if (!snapshot.hasVersion()) {
+ snapshot = snapshot.toBuilder()
+ .setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION)
+ .build();
+ }
// call pre coproc hook
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
@@ -676,15 +679,16 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
// read snapshot information
SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
- HTableDescriptor snapshotTableDesc =
- FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+ SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs,
+ snapshotDir, fsSnapshot);
+ HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// stop tracking "abandoned" handlers
cleanupSentinels();
// Verify snapshot validity
- SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, snapshotDir, fsSnapshot);
+ SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation
if (MetaReader.tableExists(master.getCatalogTracker(), tableName)) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 19c03f9..4d6182b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -48,14 +48,11 @@ import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.CopyRecoveredEditsTask;
-import org.apache.hadoop.hbase.snapshot.ReferenceRegionHFilesTask;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.TableInfoCopyTask;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.zookeeper.KeeperException;
@@ -88,6 +85,9 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
protected final TableLock tableLock;
protected final MonitoredTask status;
protected final TableName snapshotTable;
+ protected final SnapshotManifest snapshotManifest;
+
+ protected HTableDescriptor htd;
/**
* @param snapshot descriptor of the snapshot to take
@@ -107,6 +107,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
this.monitor = new ForeignExceptionDispatcher(snapshot.getName());
+ this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
this.tableLockManager = master.getTableLockManager();
this.tableLock = this.tableLockManager.writeLock(
@@ -136,7 +137,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
// case of exceptions
boolean success = false;
try {
- loadTableDescriptor(); // check that .tableinfo is present
+ this.htd = loadTableDescriptor(); // check that .tableinfo is present
success = true;
} finally {
if (!success) {
@@ -162,8 +163,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
// an external exception that gets captured here.
// write down the snapshot info in the working directory
- SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, this.fs);
- new TableInfoCopyTask(monitor, snapshot, fs, rootDir).call();
+ SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
+ snapshotManifest.addTableDescriptor(this.htd);
monitor.rethrowException();
List> regionsAndLocations =
@@ -184,16 +185,19 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
}
}
+ // flush the in-memory state, and write the single manifest
+ status.setStatus("Consolidate snapshot: " + snapshot.getName());
+ snapshotManifest.consolidate();
+
// verify the snapshot is valid
status.setStatus("Verifying snapshot: " + snapshot.getName());
verifier.verifySnapshot(this.workingDir, serverNames);
// complete the snapshot, atomically moving from tmp to .snapshot dir.
completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
- status.markComplete("Snapshot " + snapshot.getName() + " of table " + snapshotTable
- + " completed");
- LOG.info("Snapshot " + snapshot.getName() + " of table " + snapshotTable
- + " completed");
+ msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
+ status.markComplete(msg);
+ LOG.info(msg);
metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
} catch (Exception e) {
status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " +
@@ -204,8 +208,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
ForeignException ee = new ForeignException(reason, e);
monitor.receive(ee);
// need to mark this completed to close off and allow cleanup to happen.
- cancel("Failed to take snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot)
- + "' due to exception");
+ cancel(reason);
} finally {
LOG.debug("Launching cleanup of working dir:" + workingDir);
try {
@@ -262,26 +265,10 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
*/
protected void snapshotDisabledRegion(final HRegionInfo regionInfo)
throws IOException {
- // 2 copy the regionInfo files to the snapshot
- HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
- workingDir, regionInfo);
-
- // check for error for each region
- monitor.rethrowException();
-
- // 2 for each region, copy over its recovered.edits directory
- Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
- Path snapshotRegionDir = regionFs.getRegionDir();
- new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call();
- monitor.rethrowException();
- status.setStatus("Completed copying recovered edits for offline snapshot of table: "
- + snapshotTable);
-
- // 2 reference all the files in the region
- new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).call();
+ snapshotManifest.addRegion(FSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
monitor.rethrowException();
- status.setStatus("Completed referencing HFiles for offline snapshot of table: " +
- snapshotTable);
+ status.setStatus("Completed referencing HFiles for offline region " + regionInfo.toString() +
+ " of table: " + snapshotTable);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java
index aa3dc78..8adc6db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.procedure;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
@InterfaceAudience.Private
@InterfaceStability.Evolving
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 90976e2..6aecaea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -127,6 +127,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.MutationReplay;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -193,7 +194,7 @@ public class HRegion implements HeapSize { // , Writable{
public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY =
"hbase.hregion.scan.loadColumnFamiliesOnDemand";
-
+
/**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
@@ -472,7 +473,7 @@ public class HRegion implements HeapSize { // , Writable{
private RegionServerAccounting rsAccounting;
private List> recentFlushes = new ArrayList>();
private long flushCheckInterval;
- // flushPerChanges is to prevent too many changes in memstore
+ // flushPerChanges is to prevent too many changes in memstore
private long flushPerChanges;
private long blockingMemStoreSize;
final long threadWakeFrequency;
@@ -572,7 +573,7 @@ public class HRegion implements HeapSize { // , Writable{
throw new IllegalArgumentException(MEMSTORE_FLUSH_PER_CHANGES + " can not exceed "
+ MAX_FLUSH_PER_CHANGES);
}
-
+
this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration",
DEFAULT_ROWLOCK_WAIT_DURATION);
@@ -807,7 +808,7 @@ public class HRegion implements HeapSize { // , Writable{
for (Store store : this.stores.values()) {
try {
store.close();
- } catch (IOException e) {
+ } catch (IOException e) {
LOG.warn(e.getMessage());
}
}
@@ -1144,7 +1145,7 @@ public class HRegion implements HeapSize { // , Writable{
// so we do not lose data
throw new DroppedSnapshotException("Failed clearing memory after " +
actualFlushes + " attempts on region: " + Bytes.toStringBinary(getRegionName()));
- }
+ }
LOG.info("Running extra flush, " + actualFlushes +
" (carrying snapshot?) " + this);
}
@@ -2786,59 +2787,12 @@ public class HRegion implements HeapSize { // , Writable{
*/
public void addRegionToSnapshot(SnapshotDescription desc,
ForeignExceptionSnare exnSnare) throws IOException {
- // This should be "fast" since we don't rewrite store files but instead
- // back up the store files by creating a reference
- Path rootDir = FSUtils.getRootDir(this.rsServices.getConfiguration());
+ Path rootDir = FSUtils.getRootDir(conf);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
- // 1. dump region meta info into the snapshot directory
- LOG.debug("Storing region-info for snapshot.");
- HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
- this.fs.getFileSystem(), snapshotDir, getRegionInfo());
-
- // 2. iterate through all the stores in the region
- LOG.debug("Creating references for hfiles");
-
- // This ensures that we have an atomic view of the directory as long as we have < ls limit
- // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
- // batches and may miss files being added/deleted. This could be more robust (iteratively
- // checking to see if we have all the files until we are sure), but the limit is currently 1000
- // files/batch, far more than the number of store files under a single column family.
- for (Store store : stores.values()) {
- // 2.1. build the snapshot reference directory for the store
- Path dstStoreDir = snapshotRegionFs.getStoreDir(store.getFamily().getNameAsString());
- List storeFiles = new ArrayList(store.getStorefiles());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
- }
-
- // 2.2. iterate through all the store's files and create "references".
- int sz = storeFiles.size();
- for (int i = 0; i < sz; i++) {
- if (exnSnare != null) {
- exnSnare.rethrowException();
- }
- StoreFile storeFile = storeFiles.get(i);
- Path file = storeFile.getPath();
-
- LOG.debug("Creating reference for file (" + (i+1) + "/" + sz + ") : " + file);
- Path referenceFile = new Path(dstStoreDir, file.getName());
- boolean success = true;
- if (storeFile.isReference()) {
- // write the Reference object to the snapshot
- storeFile.getFileInfo().getReference().write(fs.getFileSystem(), referenceFile);
- } else {
- // create "reference" to this store file. It is intentionally an empty file -- all
- // necessary information is captured by its fs location and filename. This allows us to
- // only figure out what needs to be done via a single nn operation (instead of having to
- // open and read the files as well).
- success = fs.getFileSystem().createNewFile(referenceFile);
- }
- if (!success) {
- throw new IOException("Failed to create reference file:" + referenceFile);
- }
- }
- }
+ SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
+ snapshotDir, desc, exnSnare);
+ manifest.addRegion(this);
}
/**
@@ -4005,14 +3959,14 @@ public class HRegion implements HeapSize { // , Writable{
isStopRow(nextKv.getRowArray(), nextKv.getRowOffset(), nextKv.getRowLength());
// save that the row was empty before filters applied to it.
final boolean isEmptyRow = results.isEmpty();
-
+
// We have the part of the row necessary for filtering (all of it, usually).
// First filter with the filterRow(List).
FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED;
if (filter != null && filter.hasFilterRow()) {
ret = filter.filterRowCellsWithRet(results);
}
-
+
if ((isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE) || filterRow()) {
results.clear();
boolean moreRows = nextRow(currentRow, offset, length);
@@ -4080,7 +4034,7 @@ public class HRegion implements HeapSize { // , Writable{
return filter != null && (!filter.hasFilterRow())
&& filter.filterRow();
}
-
+
private boolean filterRowKey(byte[] row, int offset, short length) throws IOException {
return filter != null
&& filter.filterRowKey(row, offset, length);
@@ -5743,7 +5697,7 @@ public class HRegion implements HeapSize { // , Writable{
* modifies data. It has to be called just before a try.
* #closeRegionOperation needs to be called in the try's finally block
* Acquires a read lock and checks if the region is closing or closed.
- * @throws IOException
+ * @throws IOException
*/
public void startRegionOperation() throws IOException {
startRegionOperation(Operation.ANY);
@@ -5751,7 +5705,7 @@ public class HRegion implements HeapSize { // , Writable{
/**
* @param op The operation is about to be taken on the region
- * @throws IOException
+ * @throws IOException
*/
protected void startRegionOperation(Operation op) throws IOException {
switch (op) {
@@ -5801,7 +5755,7 @@ public class HRegion implements HeapSize { // , Writable{
/**
* Closes the lock. This needs to be called in the finally block corresponding
* to the try block of #startRegionOperation
- * @throws IOException
+ * @throws IOException
*/
public void closeRegionOperation() throws IOException {
closeRegionOperation(Operation.ANY);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 4497475..1bc7edd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -77,7 +77,7 @@ public class HRegionFileSystem {
private final Configuration conf;
private final Path tableDir;
private final FileSystem fs;
-
+
/**
* In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the
* client level.
@@ -149,7 +149,7 @@ public class HRegionFileSystem {
* @param familyName Column Family Name
* @return {@link Path} to the directory of the specified family
*/
- Path getStoreDir(final String familyName) {
+ public Path getStoreDir(final String familyName) {
return new Path(this.getRegionDir(), familyName);
}
@@ -176,20 +176,31 @@ public class HRegionFileSystem {
return getStoreFiles(Bytes.toString(familyName));
}
+ public Collection getStoreFiles(final String familyName) throws IOException {
+ return getStoreFiles(familyName, true);
+ }
+
/**
* Returns the store files available for the family.
* This methods performs the filtering based on the valid store files.
* @param familyName Column Family Name
* @return a set of {@link StoreFileInfo} for the specified family.
*/
- public Collection getStoreFiles(final String familyName) throws IOException {
+ public Collection getStoreFiles(final String familyName, final boolean validate)
+ throws IOException {
Path familyDir = getStoreDir(familyName);
FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
- if (files == null) return null;
+ if (files == null) {
+ LOG.debug("No StoreFiles for: " + familyDir);
+ return null;
+ }
ArrayList storeFiles = new ArrayList(files.length);
for (FileStatus status: files) {
- if (!StoreFileInfo.isValid(status)) continue;
+ if (validate && !StoreFileInfo.isValid(status)) {
+ LOG.warn("Invalid StoreFile: " + status.getPath());
+ continue;
+ }
storeFiles.add(new StoreFileInfo(this.conf, this.fs, status));
}
@@ -354,7 +365,7 @@ public class HRegionFileSystem {
Path storeDir = getStoreDir(familyName);
if(!fs.exists(storeDir) && !createDir(storeDir))
throw new IOException("Failed creating " + storeDir);
-
+
String name = buildPath.getName();
if (generateNewName) {
name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
@@ -556,16 +567,16 @@ public class HRegionFileSystem {
*/
Path splitStoreFile(final HRegionInfo hri, final String familyName,
final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
-
+
// Check whether the split row lies in the range of the store file
// If it is outside the range, return directly.
if (top) {
//check if larger than last key.
KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
- byte[] lastKey = f.createReader().getLastKey();
+ byte[] lastKey = f.createReader().getLastKey();
// If lastKey is null means storefile is empty.
if (lastKey == null) return null;
- if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
+ if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
return null;
}
@@ -575,14 +586,14 @@ public class HRegionFileSystem {
byte[] firstKey = f.createReader().getFirstKey();
// If firstKey is null means storefile is empty.
if (firstKey == null) return null;
- if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
+ if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(),
splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
return null;
- }
+ }
}
-
+
f.getReader().close(true);
-
+
Path splitDir = new Path(getSplitsDir(hri), familyName);
// A reference to the bottom half of the hsf store file.
Reference r =
@@ -681,7 +692,7 @@ public class HRegionFileSystem {
* Commit a merged region, moving it from the merges temporary directory to
* the proper location in the filesystem.
* @param mergedRegionInfo merged region {@link HRegionInfo}
- * @throws IOException
+ * @throws IOException
*/
void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index af3517c..34e1d5d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -222,7 +222,7 @@ public class StoreFile {
* @return the StoreFile object associated to this StoreFile.
* null if the StoreFile is not a reference.
*/
- StoreFileInfo getFileInfo() {
+ public StoreFileInfo getFileInfo() {
return this.fileInfo;
}
@@ -614,7 +614,7 @@ public class StoreFile {
if (comparator == null) {
comparator = KeyValue.COMPARATOR;
}
- return new Writer(fs, filePath,
+ return new Writer(fs, filePath,
conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext);
}
}
@@ -694,7 +694,7 @@ public class StoreFile {
/** Bytes per Checksum */
protected int bytesPerChecksum;
-
+
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
/* isTimeRangeTrackerSet keeps track if the timeRange has already been set
* When flushing a memstore, we set TimeRange and use this variable to
@@ -723,7 +723,7 @@ public class StoreFile {
final Configuration conf,
CacheConfig cacheConf,
final KVComparator comparator, BloomType bloomType, long maxKeys,
- InetSocketAddress[] favoredNodes, HFileContext fileContext)
+ InetSocketAddress[] favoredNodes, HFileContext fileContext)
throws IOException {
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fs, path)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index 49edadb..8952f49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -144,7 +144,7 @@ public class StoreFileInfo {
* @return the Reference object associated to this StoreFileInfo.
* null if the StoreFile is not a reference.
*/
- Reference getReference() {
+ public Reference getReference() {
return this.reference;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java
deleted file mode 100644
index 3dc2d99..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.IOException;
-import java.util.NavigableSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
-
-/**
- * Copy over each of the files in a region's recovered.edits directory to the region's snapshot
- * directory.
- *
- * This is a serial operation over each of the files in the recovered.edits directory and also
- * streams all the bytes to the client and then back to the filesystem, so the files being copied
- * should be small or it will (a) suck up a lot of bandwidth, and (b) take a long time.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class CopyRecoveredEditsTask extends SnapshotTask {
-
- private static final Log LOG = LogFactory.getLog(CopyRecoveredEditsTask.class);
- private final FileSystem fs;
- private final Path regiondir;
- private final Path outputDir;
-
- /**
- * @param snapshot Snapshot being taken
- * @param monitor error monitor for the snapshot
- * @param fs {@link FileSystem} where the snapshot is being taken
- * @param regionDir directory for the region to examine for edits
- * @param snapshotRegionDir directory for the region in the snapshot
- */
- public CopyRecoveredEditsTask(SnapshotDescription snapshot, ForeignExceptionDispatcher monitor,
- FileSystem fs, Path regionDir, Path snapshotRegionDir) {
- super(snapshot, monitor);
- this.fs = fs;
- this.regiondir = regionDir;
- this.outputDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir);
- }
-
- @Override
- public Void call() throws IOException {
- NavigableSet files = HLogUtil.getSplitEditFilesSorted(this.fs, regiondir);
- if (files == null || files.size() == 0) return null;
-
- // copy over each file.
- // this is really inefficient (could be trivially parallelized), but is
- // really simple to reason about.
- for (Path source : files) {
- // check to see if the file is zero length, in which case we can skip it
- FileStatus stat = fs.getFileStatus(source);
- if (stat.getLen() <= 0) continue;
-
- // its not zero length, so copy over the file
- Path out = new Path(outputDir, source.getName());
- LOG.debug("Copying " + source + " to " + out);
- FileUtil.copy(fs, source, fs, out, true, fs.getConf());
-
- // check for errors to the running operation after each file
- this.rethrowException();
- }
- return null;
- }
-}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index d1a9828..d7cc0ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -47,18 +47,21 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.io.FileLink;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.HLogLink;
import org.apache.hadoop.hbase.mapreduce.JobUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
@@ -99,7 +102,8 @@ public final class ExportSnapshot extends Configured implements Tool {
// Export Map-Reduce Counters, to keep track of the progress
public enum Counter { MISSING_FILES, COPY_FAILED, BYTES_EXPECTED, BYTES_COPIED, FILES_COPIED };
- private static class ExportMapper extends Mapper {
+ private static class ExportMapper extends Mapper {
final static int REPORT_SIZE = 1 * 1024 * 1024;
final static int BUFFER_SIZE = 64 * 1024;
@@ -155,35 +159,35 @@ public final class ExportSnapshot extends Configured implements Tool {
}
@Override
- public void map(Text key, NullWritable value, Context context)
+ public void map(BytesWritable key, NullWritable value, Context context)
throws InterruptedException, IOException {
- Path inputPath = new Path(key.toString());
- Path outputPath = getOutputPath(inputPath);
+ SnapshotFileInfo inputInfo = SnapshotFileInfo.parseFrom(key.copyBytes());
+ Path outputPath = getOutputPath(inputInfo);
- LOG.info("copy file input=" + inputPath + " output=" + outputPath);
- copyFile(context, inputPath, outputPath);
+ copyFile(context, inputInfo, outputPath);
}
/**
* Returns the location where the inputPath will be copied.
- * - hfiles are encoded as hfile links hfile-region-table
- * - logs are encoded as serverName/logName
*/
- private Path getOutputPath(final Path inputPath) throws IOException {
- Path path;
- if (HFileLink.isHFileLink(inputPath) || StoreFileInfo.isReference(inputPath)) {
- String family = inputPath.getParent().getName();
- TableName table =
- HFileLink.getReferencedTableName(inputPath.getName());
- String region = HFileLink.getReferencedRegionName(inputPath.getName());
- String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
- path = new Path(FSUtils.getTableDir(new Path("./"), table),
- new Path(region, new Path(family, hfile)));
- } else if (isHLogLinkPath(inputPath)) {
- String logName = inputPath.getName();
- path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName);
- } else {
- path = inputPath;
+ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException {
+ Path path = null;
+ switch (inputInfo.getType()) {
+ case HFILE:
+ Path inputPath = new Path(inputInfo.getHfile());
+ String family = inputPath.getParent().getName();
+ TableName table =HFileLink.getReferencedTableName(inputPath.getName());
+ String region = HFileLink.getReferencedRegionName(inputPath.getName());
+ String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
+ path = new Path(FSUtils.getTableDir(new Path("./"), table),
+ new Path(region, new Path(family, hfile)));
+ break;
+ case WAL:
+ Path oldLogsDir = new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME);
+ path = new Path(oldLogsDir, inputInfo.getWalName());
+ break;
+ default:
+ throw new IOException("Invalid File Type: " + inputInfo.getType().toString());
}
return new Path(outputArchive, path);
}
@@ -191,7 +195,7 @@ public final class ExportSnapshot extends Configured implements Tool {
/*
* Used by TestExportSnapshot to simulate a failure
*/
- private void injectTestFailure(final Context context, final Path inputPath)
+ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo)
throws IOException {
if (testFailures) {
if (context.getConfiguration().getBoolean(CONF_TEST_RETRY, false)) {
@@ -203,37 +207,38 @@ public final class ExportSnapshot extends Configured implements Tool {
// retry, but at least we reduce the number of test failures due to
// this test exception from the same map task.
if (random.nextFloat() < 0.03) {
- throw new IOException("TEST RETRY FAILURE: Unable to copy input=" + inputPath
+ throw new IOException("TEST RETRY FAILURE: Unable to copy input=" + inputInfo
+ " time=" + System.currentTimeMillis());
}
} else {
context.getCounter(Counter.COPY_FAILED).increment(1);
- throw new IOException("TEST FAILURE: Unable to copy input=" + inputPath);
+ throw new IOException("TEST FAILURE: Unable to copy input=" + inputInfo);
}
}
}
- private void copyFile(final Context context, final Path inputPath, final Path outputPath)
- throws IOException {
- injectTestFailure(context, inputPath);
+ private void copyFile(final Context context, final SnapshotFileInfo inputInfo,
+ final Path outputPath) throws IOException {
+ injectTestFailure(context, inputInfo);
// Get the file information
- FileStatus inputStat = getSourceFileStatus(context, inputPath);
+ FileStatus inputStat = getSourceFileStatus(context, inputInfo);
// Verify if the output file exists and is the same that we want to copy
if (outputFs.exists(outputPath)) {
FileStatus outputStat = outputFs.getFileStatus(outputPath);
if (outputStat != null && sameFile(inputStat, outputStat)) {
- LOG.info("Skip copy " + inputPath + " to " + outputPath + ", same file.");
+ LOG.info("Skip copy " + inputStat.getPath() + " to " + outputPath + ", same file.");
return;
}
}
- InputStream in = openSourceFile(context, inputPath);
+ InputStream in = openSourceFile(context, inputInfo);
int bandwidthMB = context.getConfiguration().getInt(CONF_BANDWIDTH_MB, 100);
if (Integer.MAX_VALUE != bandwidthMB) {
in = new ThrottledInputStream(new BufferedInputStream(in), bandwidthMB * 1024 * 1024);
}
+
try {
context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());
@@ -241,7 +246,7 @@ public final class ExportSnapshot extends Configured implements Tool {
outputFs.mkdirs(outputPath.getParent());
FSDataOutputStream out = outputFs.create(outputPath, true);
try {
- copyData(context, inputPath, in, outputPath, out, inputStat.getLen());
+ copyData(context, inputStat.getPath(), in, outputPath, out, inputStat.getLen());
} finally {
out.close();
}
@@ -275,7 +280,7 @@ public final class ExportSnapshot extends Configured implements Tool {
try {
if (filesMode > 0 && stat.getPermission().toShort() != filesMode) {
outputFs.setPermission(path, new FsPermission(filesMode));
- } else if (!stat.getPermission().equals(refStat.getPermission())) {
+ } else if (refStat != null && !stat.getPermission().equals(refStat.getPermission())) {
outputFs.setPermission(path, refStat.getPermission());
}
} catch (IOException e) {
@@ -283,8 +288,9 @@ public final class ExportSnapshot extends Configured implements Tool {
return false;
}
- String user = stringIsNotEmpty(filesUser) ? filesUser : refStat.getOwner();
- String group = stringIsNotEmpty(filesGroup) ? filesGroup : refStat.getGroup();
+ boolean hasRefStat = (refStat != null);
+ String user = stringIsNotEmpty(filesUser) || !hasRefStat ? filesUser : refStat.getOwner();
+ String group = stringIsNotEmpty(filesGroup) || !hasRefStat ? filesGroup : refStat.getGroup();
if (stringIsNotEmpty(user) || stringIsNotEmpty(group)) {
try {
if (!(user.equals(stat.getOwner()) && group.equals(stat.getGroup()))) {
@@ -367,40 +373,53 @@ public final class ExportSnapshot extends Configured implements Tool {
* Throws an IOException if the communication with the inputFs fail or
* if the file is not found.
*/
- private FSDataInputStream openSourceFile(Context context, final Path path) throws IOException {
+ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo)
+ throws IOException {
try {
- if (HFileLink.isHFileLink(path) || StoreFileInfo.isReference(path)) {
- return new HFileLink(inputRoot, inputArchive, path).open(inputFs);
- } else if (isHLogLinkPath(path)) {
- String serverName = path.getParent().getName();
- String logName = path.getName();
- return new HLogLink(inputRoot, serverName, logName).open(inputFs);
+ FileLink link = null;
+ switch (fileInfo.getType()) {
+ case HFILE:
+ Path inputPath = new Path(fileInfo.getHfile());
+ link = new HFileLink(inputRoot, inputArchive, inputPath);
+ break;
+ case WAL:
+ String serverName = fileInfo.getWalServer();
+ String logName = fileInfo.getWalName();
+ link = new HLogLink(inputRoot, serverName, logName);
+ break;
+ default:
+ throw new IOException("Invalid File Type: " + fileInfo.getType().toString());
}
- return inputFs.open(path);
+ return link.open(inputFs);
} catch (IOException e) {
context.getCounter(Counter.MISSING_FILES).increment(1);
- LOG.error("Unable to open source file=" + path, e);
+ LOG.error("Unable to open source file=" + fileInfo.toString(), e);
throw e;
}
}
- private FileStatus getSourceFileStatus(Context context, final Path path) throws IOException {
+ private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo)
+ throws IOException {
try {
- if (HFileLink.isHFileLink(path) || StoreFileInfo.isReference(path)) {
- HFileLink link = new HFileLink(inputRoot, inputArchive, path);
- return link.getFileStatus(inputFs);
- } else if (isHLogLinkPath(path)) {
- String serverName = path.getParent().getName();
- String logName = path.getName();
- return new HLogLink(inputRoot, serverName, logName).getFileStatus(inputFs);
+ FileLink link = null;
+ switch (fileInfo.getType()) {
+ case HFILE:
+ Path inputPath = new Path(fileInfo.getHfile());
+ link = new HFileLink(inputRoot, inputArchive, inputPath);
+ break;
+ case WAL:
+ link = new HLogLink(inputRoot, fileInfo.getWalServer(), fileInfo.getWalName());
+ break;
+ default:
+ throw new IOException("Invalid File Type: " + fileInfo.getType().toString());
}
- return inputFs.getFileStatus(path);
+ return link.getFileStatus(inputFs);
} catch (FileNotFoundException e) {
context.getCounter(Counter.MISSING_FILES).increment(1);
- LOG.error("Unable to get the status for source file=" + path, e);
+ LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e);
throw e;
} catch (IOException e) {
- LOG.error("Unable to get the status for source file=" + path, e);
+ LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e);
throw e;
}
}
@@ -434,49 +453,54 @@ public final class ExportSnapshot extends Configured implements Tool {
return inChecksum.equals(outChecksum);
}
-
- /**
- * HLog files are encoded as serverName/logName
- * and since all the other files should be in /hbase/table/..path..
- * we can rely on the depth, for now.
- */
- private static boolean isHLogLinkPath(final Path path) {
- return path.depth() == 2;
- }
}
/**
* Extract the list of files (HFiles/HLogs) to copy using Map-Reduce.
* @return list of files referenced by the snapshot (pair of path and size)
*/
- private List> getSnapshotFiles(final FileSystem fs, final Path snapshotDir)
- throws IOException {
+ private List> getSnapshotFiles(final FileSystem fs,
+ final Path snapshotDir) throws IOException {
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
- final List> files = new ArrayList>();
- final TableName table =
- TableName.valueOf(snapshotDesc.getTable());
+ final List> files = new ArrayList>();
+ final TableName table = TableName.valueOf(snapshotDesc.getTable());
final Configuration conf = getConf();
// Get snapshot files
- SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir,
- new SnapshotReferenceUtil.FileVisitor() {
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
- Path path = HFileLink.createPath(table, region, family, hfile);
- long size = new HFileLink(conf, path).getFileStatus(fs).getLen();
- files.add(new Pair(path, size));
- }
-
- public void recoveredEdits (final String region, final String logfile)
- throws IOException {
- // copied with the snapshot referenecs
+ SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc,
+ new SnapshotReferenceUtil.SnapshotVisitor() {
+ @Override
+ public void storeFile(final HRegionInfo regionInfo, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ if (storeFile.hasReference()) {
+ // copied as part of the manifest
+ } else {
+ String region = regionInfo.getEncodedName();
+ String hfile = storeFile.getName();
+ Path path = HFileLink.createPath(table, region, family, hfile);
+
+ SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder()
+ .setType(SnapshotFileInfo.Type.HFILE)
+ .setHfile(path.toString())
+ .build();
+
+ long size = new HFileLink(conf, path).getFileStatus(fs).getLen();
+ files.add(new Pair(fileInfo, size));
+ }
}
+ @Override
public void logFile (final String server, final String logfile)
throws IOException {
+ SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder()
+ .setType(SnapshotFileInfo.Type.WAL)
+ .setWalServer(server)
+ .setWalName(logfile)
+ .build();
+
long size = new HLogLink(conf, server, logfile).getFileStatus(fs).getLen();
- files.add(new Pair(new Path(server, logfile), size));
+ files.add(new Pair(fileInfo, size));
}
});
@@ -491,34 +515,35 @@ public final class ExportSnapshot extends Configured implements Tool {
* and then each group fetch the bigger file available, iterating through groups
* alternating the direction.
*/
- static List> getBalancedSplits(final List> files, int ngroups) {
+ static List> getBalancedSplits(
+ final List> files, final int ngroups) {
// Sort files by size, from small to big
- Collections.sort(files, new Comparator>() {
- public int compare(Pair a, Pair b) {
+ Collections.sort(files, new Comparator>() {
+ public int compare(Pair a, Pair b) {
long r = a.getSecond() - b.getSecond();
return (r < 0) ? -1 : ((r > 0) ? 1 : 0);
}
});
// create balanced groups
- List> fileGroups = new LinkedList>();
+ List> fileGroups = new LinkedList>();
long[] sizeGroups = new long[ngroups];
int hi = files.size() - 1;
int lo = 0;
- List group;
+ List group;
int dir = 1;
int g = 0;
while (hi >= lo) {
if (g == fileGroups.size()) {
- group = new LinkedList();
+ group = new LinkedList();
fileGroups.add(group);
} else {
group = fileGroups.get(g);
}
- Pair fileInfo = files.get(hi--);
+ Pair fileInfo = files.get(hi--);
// add the hi one
sizeGroups[g] += fileInfo.getSecond();
@@ -558,25 +583,25 @@ public final class ExportSnapshot extends Configured implements Tool {
* and the number of the files to copy.
*/
private static Path[] createInputFiles(final Configuration conf, final Path inputFolderPath,
- final List> snapshotFiles, int mappers)
+ final List> snapshotFiles, int mappers)
throws IOException, InterruptedException {
FileSystem fs = inputFolderPath.getFileSystem(conf);
LOG.debug("Input folder location: " + inputFolderPath);
- List> splits = getBalancedSplits(snapshotFiles, mappers);
+ List> splits = getBalancedSplits(snapshotFiles, mappers);
Path[] inputFiles = new Path[splits.size()];
- Text key = new Text();
+ BytesWritable key = new BytesWritable();
for (int i = 0; i < inputFiles.length; i++) {
- List files = splits.get(i);
+ List files = splits.get(i);
inputFiles[i] = new Path(inputFolderPath, String.format("export-%d.seq", i));
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inputFiles[i],
- Text.class, NullWritable.class);
+ BytesWritable.class, NullWritable.class);
LOG.debug("Input split: " + i);
try {
- for (Path file: files) {
- LOG.debug(file.toString());
- key.set(file.toString());
+ for (SnapshotFileInfo file: files) {
+ byte[] pbFileInfo = file.toByteArray();
+ key.set(pbFileInfo, 0, pbFileInfo.length);
writer.append(key, NullWritable.get());
}
} finally {
@@ -591,7 +616,7 @@ public final class ExportSnapshot extends Configured implements Tool {
* Run Map-Reduce Job to perform the files copy.
*/
private void runCopyJob(final Path inputRoot, final Path outputRoot,
- final List> snapshotFiles, final boolean verifyChecksum,
+ final List> snapshotFiles, final boolean verifyChecksum,
final String filesUser, final String filesGroup, final int filesMode,
final int mappers, final int bandwidthMB)
throws IOException, InterruptedException, ClassNotFoundException {
@@ -704,7 +729,7 @@ public final class ExportSnapshot extends Configured implements Tool {
System.err.println("UNEXPECTED: " + cmd);
printUsageAndExit();
}
- } catch (Exception e) {
+ } catch (IOException e) {
printUsageAndExit();
}
}
@@ -761,7 +786,7 @@ public final class ExportSnapshot extends Configured implements Tool {
// Step 0 - Extract snapshot files to copy
LOG.info("Loading Snapshot hfile list");
- final List> files = getSnapshotFiles(inputFs, snapshotDir);
+ final List> files = getSnapshotFiles(inputFs, snapshotDir);
if (mappers == 0 && files.size() > 0) {
mappers = 1 + (files.size() / conf.getInt(CONF_MAP_GROUP, 10));
mappers = Math.min(mappers, files.size());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java
deleted file mode 100644
index 60d48d9..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * Reference all the hfiles in a region for a snapshot.
- *
- * Doesn't take into acccount if the hfiles are valid or not, just keeps track of what's in the
- * region's directory.
- */
-public class ReferenceRegionHFilesTask extends SnapshotTask {
-
- public static final Log LOG = LogFactory.getLog(ReferenceRegionHFilesTask.class);
- private final Path regiondir;
- private final FileSystem fs;
- private final PathFilter fileFilter;
- private final Path snapshotDir;
-
- /**
- * Reference all the files in the given region directory
- * @param snapshot snapshot for which to add references
- * @param monitor to check/send error
- * @param regionDir region directory to look for errors
- * @param fs {@link FileSystem} where the snapshot/region live
- * @param regionSnapshotDir directory in the snapshot to store region files
- */
- public ReferenceRegionHFilesTask(final SnapshotDescription snapshot,
- ForeignExceptionDispatcher monitor, Path regionDir, final FileSystem fs, Path regionSnapshotDir) {
- super(snapshot, monitor);
- this.regiondir = regionDir;
- this.fs = fs;
-
- this.fileFilter = new PathFilter() {
- @Override
- public boolean accept(Path path) {
- try {
- return fs.isFile(path);
- } catch (IOException e) {
- LOG.error("Failed to reach fs to check file:" + path + ", marking as not file");
- ReferenceRegionHFilesTask.this.snapshotFailure("Failed to reach fs to check file status",
- e);
- return false;
- }
- }
- };
- this.snapshotDir = regionSnapshotDir;
- }
-
- @Override
- public Void call() throws IOException {
- FileStatus[] families = FSUtils.listStatus(fs, regiondir, new FSUtils.FamilyDirFilter(fs));
-
- // if no families, then we are done again
- if (families == null || families.length == 0) {
- LOG.info("No families under region directory:" + regiondir
- + ", not attempting to add references.");
- return null;
- }
-
- // snapshot directories to store the hfile reference
- List snapshotFamilyDirs = TakeSnapshotUtils.getFamilySnapshotDirectories(snapshot,
- snapshotDir, families);
-
- LOG.debug("Add hfile references to snapshot directories:" + snapshotFamilyDirs);
- for (int i = 0; i < families.length; i++) {
- FileStatus family = families[i];
- Path familyDir = family.getPath();
- // get all the hfiles in the family
- FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir, fileFilter);
-
- // if no hfiles, then we are done with this family
- if (hfiles == null || hfiles.length == 0) {
- LOG.debug("Not hfiles found for family: " + familyDir + ", skipping.");
- continue;
- }
-
- // make the snapshot's family directory
- Path snapshotFamilyDir = snapshotFamilyDirs.get(i);
- fs.mkdirs(snapshotFamilyDir);
-
- // create a reference for each hfile
- for (FileStatus hfile : hfiles) {
- // references are 0-length files, relying on file name.
- Path referenceFile = new Path(snapshotFamilyDir, hfile.getPath().getName());
- LOG.debug("Creating reference for:" + hfile.getPath() + " at " + referenceFile);
- if (!fs.createNewFile(referenceFile)) {
- throw new IOException("Failed to create reference file:" + referenceFile);
- }
- }
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Finished referencing hfiles, current region state:");
- FSUtils.logFileSystemState(fs, regiondir, LOG);
- LOG.debug("and the snapshot directory:");
- FSUtils.logFileSystemState(fs, snapshotDir, LOG);
- }
- return null;
- }
-}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java
deleted file mode 100644
index 9c987ab..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceServerWALsTask.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * Reference all the WAL files under a server's WAL directory
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class ReferenceServerWALsTask extends SnapshotTask {
- private static final Log LOG = LogFactory.getLog(ReferenceServerWALsTask.class);
- private final FileSystem fs;
- private final Configuration conf;
- private final String serverName;
- private Path logDir;
-
- /**
- * @param snapshot snapshot being run
- * @param failureListener listener to check for errors while running the operation and to
- * propagate errors found while running the task
- * @param logDir log directory for the server. Name of the directory is taken as the name of the
- * server
- * @param conf {@link Configuration} to extract filesystem information
- * @param fs filesystem where the log files are stored and should be referenced
- */
- public ReferenceServerWALsTask(SnapshotDescription snapshot,
- ForeignExceptionDispatcher failureListener, final Path logDir, final Configuration conf,
- final FileSystem fs) {
- super(snapshot, failureListener);
- this.fs = fs;
- this.conf = conf;
- this.serverName = logDir.getName();
- this.logDir = logDir;
- }
-
- /**
- * Create reference files (empty files with the same path and file name as original).
- * @throws IOException exception from hdfs or network problems
- * @throws ForeignException exception from an external procedure
- */
- @Override
- public Void call() throws IOException, ForeignException {
- // TODO switch to using a single file to reference all required WAL files
-
- // Iterate through each of the log files and add a reference to it.
- // assumes that all the files under the server's logs directory is a log
- FileStatus[] serverLogs = FSUtils.listStatus(fs, logDir, null);
- if (serverLogs == null) {
- LOG.debug("No logs for server directory:" + logDir + ", done referencing files.");
- return null;
- }
-
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding references for WAL files:" + Arrays.toString(serverLogs));
- }
-
- for (FileStatus file : serverLogs) {
- this.rethrowException();
-
- // add the reference to the file. ex: hbase/.snapshots/.logs//
- Path rootDir = FSUtils.getRootDir(conf);
- Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(this.snapshot, rootDir);
- Path snapshotLogDir = TakeSnapshotUtils.getSnapshotHLogsDir(snapshotDir, serverName);
- // actually store the reference on disk (small file)
- Path ref = new Path(snapshotLogDir, file.getPath().getName());
- if (!fs.createNewFile(ref)) {
- if (!fs.exists(ref)) {
- throw new IOException("Couldn't create reference for:" + file.getPath());
- }
- }
- LOG.debug("Completed WAL referencing for: " + file.getPath() + " to " + ref);
- }
-
- LOG.debug("Successfully completed WAL referencing for ALL files");
- return null;
- }
-}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 83dbaa1..8a63862 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
@@ -46,17 +47,18 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSVisitor;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.IOUtils;
@@ -115,9 +117,9 @@ public class RestoreSnapshotHelper {
private final ForeignExceptionDispatcher monitor;
private final MonitoredTask status;
+ private final SnapshotManifest snapshotManifest;
private final SnapshotDescription snapshotDesc;
private final TableName snapshotTable;
- private final Path snapshotDir;
private final HTableDescriptor tableDesc;
private final Path rootDir;
@@ -128,8 +130,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
- final SnapshotDescription snapshotDescription,
- final Path snapshotDir,
+ final SnapshotManifest manifest,
final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
@@ -137,9 +138,9 @@ public class RestoreSnapshotHelper {
{
this.fs = fs;
this.conf = conf;
- this.snapshotDesc = snapshotDescription;
- this.snapshotTable = TableName.valueOf(snapshotDescription.getTable());
- this.snapshotDir = snapshotDir;
+ this.snapshotManifest = manifest;
+ this.snapshotDesc = manifest.getSnapshotDescription();
+ this.snapshotTable = TableName.valueOf(snapshotDesc.getTable());
this.tableDesc = tableDescriptor;
this.rootDir = rootDir;
this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName());
@@ -153,14 +154,19 @@ public class RestoreSnapshotHelper {
*/
public RestoreMetaChanges restoreHdfsRegions() throws IOException {
LOG.debug("starting restore");
- Set snapshotRegionNames = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
- if (snapshotRegionNames == null) {
+
+ Map regionManifests = snapshotManifest.getRegionManifestsMap();
+ if (regionManifests == null) {
LOG.warn("Nothing to restore. Snapshot " + snapshotDesc + " looks empty");
return null;
}
RestoreMetaChanges metaChanges = new RestoreMetaChanges(parentsMap);
+ // Take a copy of the manifest.keySet() since we are going to modify
+ // this instance, by removing the regions already present in the restore dir.
+ Set regionNames = new HashSet(regionManifests.keySet());
+
// Identify which region are still available and which not.
// NOTE: we rely upon the region name as: "table name, start key, end key"
List tableRegions = getTableRegions();
@@ -168,9 +174,9 @@ public class RestoreSnapshotHelper {
monitor.rethrowException();
for (HRegionInfo regionInfo: tableRegions) {
String regionName = regionInfo.getEncodedName();
- if (snapshotRegionNames.contains(regionName)) {
+ if (regionNames.contains(regionName)) {
LOG.info("region to restore: " + regionName);
- snapshotRegionNames.remove(regionName);
+ regionNames.remove(regionName);
metaChanges.addRegionToRestore(regionInfo);
} else {
LOG.info("region to remove: " + regionName);
@@ -181,7 +187,7 @@ public class RestoreSnapshotHelper {
// Restore regions using the snapshot data
monitor.rethrowException();
status.setStatus("Restoring table regions...");
- restoreHdfsRegions(metaChanges.getRegionsToRestore());
+ restoreHdfsRegions(regionManifests, metaChanges.getRegionsToRestore());
status.setStatus("Finished restoring all table regions.");
// Remove regions from the current table
@@ -192,30 +198,23 @@ public class RestoreSnapshotHelper {
}
// Regions to Add: present in the snapshot but not in the current table
- if (snapshotRegionNames.size() > 0) {
- List regionsToAdd = new LinkedList();
+ if (regionNames.size() > 0) {
+ List regionsToAdd = new ArrayList(regionNames.size());
monitor.rethrowException();
- for (String regionName: snapshotRegionNames) {
+ for (String regionName: regionNames) {
LOG.info("region to add: " + regionName);
- Path regionDir = new Path(snapshotDir, regionName);
- regionsToAdd.add(HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir));
+ regionsToAdd.add(HRegionInfo.convert(regionManifests.get(regionName).getRegionInfo()));
}
// Create new regions cloning from the snapshot
monitor.rethrowException();
status.setStatus("Cloning regions...");
- HRegionInfo[] clonedRegions = cloneHdfsRegions(regionsToAdd);
+ HRegionInfo[] clonedRegions = cloneHdfsRegions(regionManifests, regionsToAdd);
metaChanges.setNewRegions(clonedRegions);
status.setStatus("Finished cloning regions.");
}
- // Restore WALs
- monitor.rethrowException();
- status.setStatus("Restoring WALs to table...");
- restoreWALs();
- status.setStatus("Finished restoring WALs to table.");
-
return metaChanges;
}
@@ -357,19 +356,34 @@ public class RestoreSnapshotHelper {
/**
* Restore specified regions by restoring content to the snapshot state.
*/
- private void restoreHdfsRegions(final List regions) throws IOException {
+ private void restoreHdfsRegions(final Map regionManifests,
+ final List regions) throws IOException {
if (regions == null || regions.size() == 0) return;
- for (HRegionInfo hri: regions) restoreRegion(hri);
+ for (HRegionInfo hri: regions) {
+ restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
+ }
+ }
+
+ private Map> getRegionHFileReferences(
+ final SnapshotRegionManifest manifest) {
+ Map> familyMap =
+ new HashMap>(manifest.getFamilyFilesCount());
+ for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) {
+ familyMap.put(familyFiles.getFamilyName().toStringUtf8(),
+ new ArrayList(familyFiles.getStoreFilesList()));
+ }
+ return familyMap;
}
/**
* Restore region by removing files not in the snapshot
* and adding the missing ones from the snapshot.
*/
- private void restoreRegion(HRegionInfo regionInfo) throws IOException {
- Path snapshotRegionDir = new Path(snapshotDir, regionInfo.getEncodedName());
- Map> snapshotFiles =
- SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir);
+ private void restoreRegion(final HRegionInfo regionInfo,
+ final SnapshotRegionManifest regionManifest) throws IOException {
+ Map> snapshotFiles =
+ getRegionHFileReferences(regionManifest);
+
Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
String tableName = tableDesc.getTableName().getNameAsString();
@@ -377,32 +391,34 @@ public class RestoreSnapshotHelper {
for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
byte[] family = Bytes.toBytes(familyDir.getName());
Set familyFiles = getTableRegionFamilyFiles(familyDir);
- List snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName());
+ List snapshotFamilyFiles =
+ snapshotFiles.remove(familyDir.getName());
if (snapshotFamilyFiles != null) {
- List hfilesToAdd = new LinkedList();
- for (String hfileName: snapshotFamilyFiles) {
- if (familyFiles.contains(hfileName)) {
+ List hfilesToAdd =
+ new ArrayList();
+ for (SnapshotRegionManifest.StoreFile storeFile: snapshotFamilyFiles) {
+ if (familyFiles.contains(storeFile.getName())) {
// HFile already present
- familyFiles.remove(hfileName);
+ familyFiles.remove(storeFile.getName());
} else {
// HFile missing
- hfilesToAdd.add(hfileName);
+ hfilesToAdd.add(storeFile);
}
}
// Remove hfiles not present in the snapshot
for (String hfileName: familyFiles) {
Path hfile = new Path(familyDir, hfileName);
- LOG.trace("Removing hfile=" + hfile +
+ LOG.trace("Removing hfile=" + hfileName +
" from region=" + regionInfo.getEncodedName() + " table=" + tableName);
HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile);
}
// Restore Missing files
- for (String hfileName: hfilesToAdd) {
- LOG.trace("Adding HFileLink " + hfileName +
+ for (SnapshotRegionManifest.StoreFile storeFile: hfilesToAdd) {
+ LOG.debug("Adding HFileLink " + storeFile.getName() +
" to region=" + regionInfo.getEncodedName() + " table=" + tableName);
- restoreStoreFile(familyDir, regionInfo, hfileName);
+ restoreStoreFile(familyDir, regionInfo, storeFile);
}
} else {
// Family doesn't exists in the snapshot
@@ -414,15 +430,16 @@ public class RestoreSnapshotHelper {
}
// Add families not present in the table
- for (Map.Entry> familyEntry: snapshotFiles.entrySet()) {
+ for (Map.Entry> familyEntry:
+ snapshotFiles.entrySet()) {
Path familyDir = new Path(regionDir, familyEntry.getKey());
if (!fs.mkdirs(familyDir)) {
throw new IOException("Unable to create familyDir=" + familyDir);
}
- for (String hfileName: familyEntry.getValue()) {
- LOG.trace("Adding HFileLink " + hfileName + " to table=" + tableName);
- restoreStoreFile(familyDir, regionInfo, hfileName);
+ for (SnapshotRegionManifest.StoreFile storeFile: familyEntry.getValue()) {
+ LOG.trace("Adding HFileLink " + storeFile.getName() + " to table=" + tableName);
+ restoreStoreFile(familyDir, regionInfo, storeFile);
}
}
}
@@ -448,7 +465,8 @@ public class RestoreSnapshotHelper {
* Clone specified regions. For each region create a new region
* and create a HFileLink for each hfile.
*/
- private HRegionInfo[] cloneHdfsRegions(final List regions) throws IOException {
+ private HRegionInfo[] cloneHdfsRegions(final Map regionManifests,
+ final List regions) throws IOException {
if (regions == null || regions.size() == 0) return null;
final Map snapshotRegions =
@@ -476,7 +494,8 @@ public class RestoreSnapshotHelper {
tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
@Override
public void fillRegion(final HRegion region) throws IOException {
- cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
+ HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName());
+ cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName()));
}
});
@@ -494,21 +513,17 @@ public class RestoreSnapshotHelper {
* @param region {@link HRegion} cloned
* @param snapshotRegionInfo
*/
- private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo)
- throws IOException {
- final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName());
+ private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo,
+ final SnapshotRegionManifest manifest) throws IOException {
final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
final String tableName = tableDesc.getTableName().getNameAsString();
- SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir,
- new FSVisitor.StoreFileVisitor() {
- @Override
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
- LOG.info("Adding HFileLink " + hfile + " to table=" + tableName);
- Path familyDir = new Path(regionDir, family);
- restoreStoreFile(familyDir, snapshotRegionInfo, hfile);
- }
- });
+ for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) {
+ Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8());
+ for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) {
+ LOG.info("Adding HFileLink " + storeFile.getName() + " to table=" + tableName);
+ restoreStoreFile(familyDir, snapshotRegionInfo, storeFile);
+ }
+ }
}
/**
@@ -524,11 +539,12 @@ public class RestoreSnapshotHelper {
* @param hfileName store file name (can be a Reference, HFileLink or simple HFile)
*/
private void restoreStoreFile(final Path familyDir, final HRegionInfo regionInfo,
- final String hfileName) throws IOException {
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ String hfileName = storeFile.getName();
if (HFileLink.isHFileLink(hfileName)) {
HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName);
} else if (StoreFileInfo.isReference(hfileName)) {
- restoreReferenceFile(familyDir, regionInfo, hfileName);
+ restoreReferenceFile(familyDir, regionInfo, storeFile);
} else {
HFileLink.create(conf, fs, familyDir, regionInfo, hfileName);
}
@@ -553,7 +569,9 @@ public class RestoreSnapshotHelper {
* @param hfileName reference file name
*/
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
- final String hfileName) throws IOException {
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ String hfileName = storeFile.getName();
+
// Extract the referred information (hfile name and parent region)
Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(
snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()),
@@ -577,16 +595,21 @@ public class RestoreSnapshotHelper {
Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
// Create the new reference
- InputStream in;
- if (linkPath != null) {
- in = new HFileLink(conf, linkPath).open(fs);
+ if (storeFile.hasReference()) {
+ Reference reference = Reference.convert(storeFile.getReference());
+ reference.write(fs, outPath);
} else {
- linkPath = new Path(new Path(HRegion.getRegionDir(snapshotDir, regionInfo.getEncodedName()),
- familyDir.getName()), hfileName);
- in = fs.open(linkPath);
+ InputStream in;
+ if (linkPath != null) {
+ in = new HFileLink(conf, linkPath).open(fs);
+ } else {
+ linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(),
+ regionInfo.getEncodedName()), familyDir.getName()), hfileName);
+ in = fs.open(linkPath);
+ }
+ OutputStream out = fs.create(outPath);
+ IOUtils.copyBytes(in, out, conf);
}
- OutputStream out = fs.create(outPath);
- IOUtils.copyBytes(in, out, conf);
// Add the daughter region to the map
String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes()));
@@ -619,43 +642,6 @@ public class RestoreSnapshotHelper {
}
/**
- * Restore snapshot WALs.
- *
- * Global Snapshot keep a reference to region servers logs present during the snapshot.
- * (/hbase/.snapshot/snapshotName/.logs/hostName/logName)
- *
- * Since each log contains different tables data, logs must be split to
- * extract the table that we are interested in.
- */
- private void restoreWALs() throws IOException {
- final SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir,
- snapshotTable, regionsMap);
- // TODO: use executors to parallelize splitting
- // TODO: once split, we do not need to split again for other restores
- try {
- // Recover.Edits
- SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir,
- new FSVisitor.RecoveredEditsVisitor() {
- @Override
- public void recoveredEdits (final String region, final String logfile) throws IOException {
- Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile);
- logSplitter.splitRecoveredEdit(path);
- }
- });
-
- // Region Server Logs
- SnapshotReferenceUtil.visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
- @Override
- public void logFile (final String server, final String logfile) throws IOException {
- logSplitter.splitLog(server, logfile);
- }
- });
- } finally {
- logSplitter.close();
- }
- }
-
- /**
* @return the set of the regions contained in the table
*/
private List getTableRegions() throws IOException {
@@ -720,16 +706,14 @@ public class RestoreSnapshotHelper {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
-
- //load table descriptor
- HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
MonitoredTask status = TaskMonitor.get().createStatus(
"Restoring snapshot '" + snapshotName + "' to directory " + restoreDir);
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher();
- RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, snapshotDesc,
- snapshotDir, htd, restoreDir, monitor, status);
+ RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs,
+ manifest, manifest.getTableDescriptor(), restoreDir, monitor, status);
helper.restoreHdfsRegions(); // TODO: parallelize.
if (LOG.isDebugEnabled()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 34d3224..203f6de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
import java.util.Collections;
-import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -32,6 +31,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -91,7 +91,7 @@ public class SnapshotDescriptionUtils {
* Version of the fs layout for a snapshot. Future snapshots may have different file layouts,
* which we may need to read in differently.
*/
- public static final int SNAPSHOT_LAYOUT_VERSION = 0;
+ public static final int SNAPSHOT_LAYOUT_VERSION = SnapshotManifestV2.DESCRIPTOR_VERSION;
// snapshot directory constants
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
index 08a444b..63cdbc4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -43,8 +43,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.HLogLink;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
/**
* Tool for dumping snapshot information.
@@ -184,10 +184,10 @@ public final class SnapshotInfo extends Configured implements Tool {
* @param hfile store file name
* @return the store file information
*/
- FileInfo addStoreFile(final String region, final String family, final String hfile)
- throws IOException {
- TableName table = snapshotTable;
- HFileLink link = HFileLink.create(conf, table, region, family, hfile);
+ FileInfo addStoreFile(final HRegionInfo region, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ HFileLink link = HFileLink.create(conf, snapshotTable, region.getEncodedName(),
+ family, storeFile.getName());
boolean inArchive = false;
long size = -1;
try {
@@ -207,22 +207,6 @@ public final class SnapshotInfo extends Configured implements Tool {
}
/**
- * Add the specified recovered.edits file to the stats
- * @param region region encoded name
- * @param logfile log file name
- * @return the recovered.edits information
- */
- FileInfo addRecoveredEdits(final String region, final String logfile) throws IOException {
- Path rootDir = FSUtils.getRootDir(conf);
- Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
- Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile);
- long size = fs.getFileStatus(path).getLen();
- logSize += size;
- logsCount++;
- return new FileInfo(true, size);
- }
-
- /**
* Add the specified log file to the stats
* @param server server name
* @param logfile log file name
@@ -245,9 +229,7 @@ public final class SnapshotInfo extends Configured implements Tool {
private FileSystem fs;
private Path rootDir;
- private HTableDescriptor snapshotTableDesc;
- private SnapshotDescription snapshotDesc;
- private Path snapshotDir;
+ private SnapshotManifest snapshotManifest;
@Override
public int run(String[] args) throws IOException, InterruptedException {
@@ -309,14 +291,14 @@ public final class SnapshotInfo extends Configured implements Tool {
* @return false if snapshot is not found
*/
private boolean loadSnapshotInfo(final String snapshotName) throws IOException {
- snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+ Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
if (!fs.exists(snapshotDir)) {
LOG.warn("Snapshot '" + snapshotName + "' not found in: " + snapshotDir);
return false;
}
- snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
- snapshotTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
+ SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+ snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc);
return true;
}
@@ -324,12 +306,13 @@ public final class SnapshotInfo extends Configured implements Tool {
* Dump the {@link SnapshotDescription}
*/
private void printInfo() {
+ SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
System.out.println("Snapshot Info");
System.out.println("----------------------------------------");
System.out.println(" Name: " + snapshotDesc.getName());
System.out.println(" Type: " + snapshotDesc.getType());
- System.out.println(" Table: " + snapshotTableDesc.getTableName().getNameAsString());
+ System.out.println(" Table: " + snapshotDesc.getTable());
System.out.println(" Format: " + snapshotDesc.getVersion());
System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime())));
System.out.println();
@@ -341,7 +324,7 @@ public final class SnapshotInfo extends Configured implements Tool {
private void printSchema() {
System.out.println("Table Descriptor");
System.out.println("----------------------------------------");
- System.out.println(snapshotTableDesc.toString());
+ System.out.println(snapshotManifest.getTableDescriptor().toString());
System.out.println();
}
@@ -356,32 +339,26 @@ public final class SnapshotInfo extends Configured implements Tool {
}
// Collect information about hfiles and logs in the snapshot
- final String table = snapshotTableDesc.getTableName().getNameAsString();
- final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, this.snapshotDesc);
- SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir,
- new SnapshotReferenceUtil.FileVisitor() {
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
- SnapshotStats.FileInfo info = stats.addStoreFile(region, family, hfile);
-
+ final SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
+ final String table = snapshotDesc.getTable();
+ final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, snapshotDesc);
+ SnapshotReferenceUtil.visitReferencedFiles(getConf(), fs,
+ snapshotManifest.getSnapshotDir(), snapshotDesc, new SnapshotReferenceUtil.SnapshotVisitor() {
+ @Override
+ public void storeFile(final HRegionInfo regionInfo, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ if (storeFile.hasReference()) return;
+
+ SnapshotStats.FileInfo info = stats.addStoreFile(regionInfo, family, storeFile);
if (showFiles) {
System.out.printf("%8s %s/%s/%s/%s %s%n",
(info.isMissing() ? "-" : StringUtils.humanReadableInt(info.getSize())),
- table, region, family, hfile,
+ table, regionInfo.getEncodedName(), family, storeFile.getName(),
(info.inArchive() ? "(archive)" : info.isMissing() ? "(NOT FOUND)" : ""));
}
}
- public void recoveredEdits (final String region, final String logfile)
- throws IOException {
- SnapshotStats.FileInfo info = stats.addRecoveredEdits(region, logfile);
-
- if (showFiles) {
- System.out.printf("%8s recovered.edits %s on region %s%n",
- StringUtils.humanReadableInt(info.getSize()), logfile, region);
- }
- }
-
+ @Override
public void logFile (final String server, final String logfile)
throws IOException {
SnapshotStats.FileInfo info = stats.addLogFile(server, logfile);
@@ -444,17 +421,17 @@ public final class SnapshotInfo extends Configured implements Tool {
FileSystem fs = FileSystem.get(conf);
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
final SnapshotStats stats = new SnapshotStats(conf, fs, snapshot);
- SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir,
- new SnapshotReferenceUtil.FileVisitor() {
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
- stats.addStoreFile(region, family, hfile);
- }
-
- public void recoveredEdits (final String region, final String logfile) throws IOException {
- stats.addRecoveredEdits(region, logfile);
+ SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshot,
+ new SnapshotReferenceUtil.SnapshotVisitor() {
+ @Override
+ public void storeFile(final HRegionInfo regionInfo, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ if (!storeFile.hasReference()) {
+ stats.addStoreFile(regionInfo, family, storeFile);
+ }
}
+ @Override
public void logFile (final String server, final String logfile) throws IOException {
stats.addLogFile(server, logfile);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java
deleted file mode 100644
index 3443c5f..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.io.HLogLink;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * If the snapshot has references to one or more log files,
- * those must be split (each log contains multiple tables and regions)
- * and must be placed in the region/recovered.edits folder.
- * (recovered.edits files will be played on region startup)
- *
- * In case of Restore: the log can just be split in the recovered.edits folder.
- * In case of Clone: each entry in the log must be modified to use the new region name.
- * (region names are encoded with: tableName, startKey, regionIdTimeStamp)
- *
- * We can't use the normal split code, because the HLogKey contains the
- * table name and the region name, and in case of "clone from snapshot"
- * region name and table name will be different and must be replaced in
- * the recovered.edits.
- */
-@InterfaceAudience.Private
-class SnapshotLogSplitter implements Closeable {
- static final Log LOG = LogFactory.getLog(SnapshotLogSplitter.class);
-
- private final class LogWriter implements Closeable {
- private HLog.Writer writer;
- private Path logFile;
- private long seqId;
-
- public LogWriter(final Configuration conf, final FileSystem fs,
- final Path logDir, long seqId) throws IOException {
- logFile = new Path(logDir, logFileName(seqId, true));
- this.writer = HLogFactory.createRecoveredEditsWriter(fs, logFile, conf);
- this.seqId = seqId;
- }
-
- public void close() throws IOException {
- writer.close();
-
- Path finalFile = new Path(logFile.getParent(), logFileName(seqId, false));
- LOG.debug("LogWriter tmpLogFile=" + logFile + " -> logFile=" + finalFile);
- fs.rename(logFile, finalFile);
- }
-
- public void append(final HLog.Entry entry) throws IOException {
- writer.append(entry);
- if (seqId < entry.getKey().getLogSeqNum()) {
- seqId = entry.getKey().getLogSeqNum();
- }
- }
-
- private String logFileName(long seqId, boolean temp) {
- String fileName = String.format("%019d", seqId);
- if (temp) fileName += HLog.RECOVERED_LOG_TMPFILE_SUFFIX;
- return fileName;
- }
- }
-
- private final Map regionLogWriters =
- new TreeMap(Bytes.BYTES_COMPARATOR);
-
- private final Map regionsMap;
- private final Configuration conf;
- private final TableName snapshotTableName;
- private final TableName tableName;
- private final Path tableDir;
- private final FileSystem fs;
-
- /**
- * @params tableName snapshot table name
- * @params regionsMap maps original region names to the new ones.
- */
- public SnapshotLogSplitter(final Configuration conf, final FileSystem fs,
- final Path tableDir, final TableName snapshotTableName,
- final Map regionsMap) {
- this.regionsMap = regionsMap;
- this.snapshotTableName = snapshotTableName;
- this.tableName = FSUtils.getTableName(tableDir);
- this.tableDir = tableDir;
- this.conf = conf;
- this.fs = fs;
- }
-
- public void close() throws IOException {
- for (LogWriter writer: regionLogWriters.values()) {
- writer.close();
- }
- }
-
- public void splitLog(final String serverName, final String logfile) throws IOException {
- LOG.debug("Restore log=" + logfile + " server=" + serverName +
- " for snapshotTable=" + snapshotTableName +
- " to table=" + tableName);
- splitLog(new HLogLink(conf, serverName, logfile).getAvailablePath(fs));
- }
-
- public void splitRecoveredEdit(final Path editPath) throws IOException {
- LOG.debug("Restore recover.edits=" + editPath +
- " for snapshotTable=" + snapshotTableName +
- " to table=" + tableName);
- splitLog(editPath);
- }
-
- /**
- * Split the snapshot HLog reference into regions recovered.edits.
- *
- * The HLogKey contains the table name and the region name,
- * and they must be changed to the restored table names.
- *
- * @param logPath Snapshot HLog reference path
- */
- public void splitLog(final Path logPath) throws IOException {
- HLog.Reader log = HLogFactory.createReader(fs, logPath, conf);
- try {
- HLog.Entry entry;
- LogWriter writer = null;
- byte[] regionName = null;
- byte[] newRegionName = null;
- while ((entry = log.next()) != null) {
- HLogKey key = entry.getKey();
-
- // We're interested only in the snapshot table that we're restoring
- if (!key.getTablename().equals(snapshotTableName)) continue;
-
- // Writer for region.
- if (!Bytes.equals(regionName, key.getEncodedRegionName())) {
- regionName = key.getEncodedRegionName().clone();
-
- // Get the new region name in case of clone, or use the original one
- newRegionName = regionsMap.get(regionName);
- if (newRegionName == null) newRegionName = regionName;
-
- writer = getOrCreateWriter(newRegionName, key.getLogSeqNum());
- LOG.debug("+ regionName=" + Bytes.toString(regionName));
- }
-
- // Append Entry
- key = new HLogKey(newRegionName, tableName, key.getLogSeqNum(), key.getWriteTime(),
- key.getClusterIds(), key.getNonceGroup(), key.getNonce());
- writer.append(new HLog.Entry(key, entry.getEdit()));
- }
- } catch (IOException e) {
- LOG.warn("Something wrong during the log split", e);
- } finally {
- log.close();
- }
- }
-
- /**
- * Create a LogWriter for specified region if not already created.
- */
- private LogWriter getOrCreateWriter(final byte[] regionName, long seqId) throws IOException {
- LogWriter writer = regionLogWriters.get(regionName);
- if (writer == null) {
- Path regionDir = HRegion.getRegionDir(tableDir, Bytes.toString(regionName));
- Path dir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
- fs.mkdirs(dir);
-
- writer = new LogWriter(conf, fs, dir, seqId);
- regionLogWriters.put(regionName, writer);
- }
- return(writer);
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
new file mode 100644
index 0000000..621b835
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -0,0 +1,468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.Threads;
+
+/**
+ * Utility class to help read/write the Snapshot Manifest.
+ *
+ * The snapshot format is transparent for the users of this class,
+ * once the snapshot is written, it will never be modified.
+ * On open() the snapshot will be loaded to the current in-memory format.
+ */
+@InterfaceAudience.Private
+public class SnapshotManifest {
+ private static final Log LOG = LogFactory.getLog(SnapshotManifest.class);
+
+ private static final String DATA_MANIFEST_NAME = "data.manifest";
+
+ private List regionManifests;
+ private SnapshotDescription desc;
+ private HTableDescriptor htd;
+
+ private final ForeignExceptionSnare monitor;
+ private final Configuration conf;
+ private final Path workingDir;
+ private final FileSystem fs;
+
+ private SnapshotManifest(final Configuration conf, final FileSystem fs,
+ final Path workingDir, final SnapshotDescription desc,
+ final ForeignExceptionSnare monitor) {
+ this.monitor = monitor;
+ this.desc = desc;
+ this.workingDir = workingDir;
+ this.conf = conf;
+ this.fs = fs;
+ }
+
+ /**
+ * Return a SnapshotManifest instance, used for writing a snapshot.
+ *
+ * There are two usage pattern:
+ * - The Master will create a manifest, add the descriptor, offline regions
+ * and consolidate the snapshot by writing all the pending stuff on-disk.
+ * manifest = SnapshotManifest.create(...)
+ * manifest.addRegion(tableDir, hri)
+ * manifest.consolidate()
+ * - The RegionServer will create a single region manifest
+ * manifest = SnapshotManifest.create(...)
+ * manifest.addRegion(region)
+ */
+ public static SnapshotManifest create(final Configuration conf, final FileSystem fs,
+ final Path workingDir, final SnapshotDescription desc,
+ final ForeignExceptionSnare monitor) {
+ return new SnapshotManifest(conf, fs, workingDir, desc, monitor);
+ }
+
+ /**
+ * Return a SnapshotManifest instance with the information already loaded in-memory.
+ * SnapshotManifest manifest = SnapshotManifest.open(...)
+ * HTableDescriptor htd = manifest.getTableDescriptor()
+ * for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests())
+ * hri = regionManifest.getRegionInfo()
+ * for (regionManifest.getFamilyFiles())
+ * ...
+ */
+ public static SnapshotManifest open(final Configuration conf, final FileSystem fs,
+ final Path workingDir, final SnapshotDescription desc) throws IOException {
+ SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null);
+ manifest.load();
+ return manifest;
+ }
+
+
+ /**
+ * Add the table descriptor to the snapshot manifest
+ */
+ public void addTableDescriptor(final HTableDescriptor htd) throws IOException {
+ this.htd = htd;
+ }
+
+ interface RegionVisitor {
+ TRegion regionOpen(final HRegionInfo regionInfo) throws IOException;
+ void regionClose(final TRegion region) throws IOException;
+
+ TFamily familyOpen(final TRegion region, final byte[] familyName) throws IOException;
+ void familyClose(final TRegion region, final TFamily family) throws IOException;
+
+ void storeFile(final TRegion region, final TFamily family, final StoreFileInfo storeFile)
+ throws IOException;
+ }
+
+ private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws IOException {
+ switch (getSnapshotFormat(desc)) {
+ case SnapshotManifestV1.DESCRIPTOR_VERSION:
+ return new SnapshotManifestV1.ManifestBuilder(conf, fs, workingDir);
+ case SnapshotManifestV2.DESCRIPTOR_VERSION:
+ return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir);
+ default:
+ throw new CorruptedSnapshotException("Invalid Snapshot version: "+ desc.getVersion(), desc);
+ }
+ }
+
+ /**
+ * Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
+ * This is used by the "online snapshot" when the table is enabled.
+ */
+ public void addRegion(final HRegion region) throws IOException {
+ // 0. Get the ManifestBuilder/RegionVisitor
+ RegionVisitor visitor = createRegionVisitor(desc);
+
+ // 1. dump region meta info into the snapshot directory
+ LOG.debug("Storing '" + region + "' region-info for snapshot.");
+ Object regionData = visitor.regionOpen(region.getRegionInfo());
+ monitor.rethrowException();
+
+ // 2. iterate through all the stores in the region
+ LOG.debug("Creating references for hfiles");
+
+ for (Store store : region.getStores().values()) {
+ // 2.1. build the snapshot reference for the store
+ Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
+ monitor.rethrowException();
+
+ List storeFiles = new ArrayList(store.getStorefiles());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
+ }
+
+ // 2.2. iterate through all the store's files and create "references".
+ for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
+ StoreFile storeFile = storeFiles.get(i);
+ monitor.rethrowException();
+
+ // create "reference" to this store file.
+ LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath());
+ visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
+ }
+ visitor.familyClose(regionData, familyData);
+ }
+ visitor.regionClose(regionData);
+ }
+
+ /**
+ * Creates a 'manifest' for the specified region, by reading directly from the disk.
+ * This is used by the "offline snapshot" when the table is disabled.
+ */
+ public void addRegion(final Path tableDir, final HRegionInfo regionInfo) throws IOException {
+ // 0. Get the ManifestBuilder/RegionVisitor
+ RegionVisitor visitor = createRegionVisitor(desc);
+
+ // Open the RegionFS
+ HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
+ tableDir, regionInfo, true);
+ monitor.rethrowException();
+
+ // 1. dump region meta info into the snapshot directory
+ LOG.debug("Storing region-info for snapshot.");
+ Object regionData = visitor.regionOpen(regionInfo);
+ monitor.rethrowException();
+
+ // 2. iterate through all the stores in the region
+ LOG.debug("Creating references for hfiles");
+
+ // This ensures that we have an atomic view of the directory as long as we have < ls limit
+ // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
+ // batches and may miss files being added/deleted. This could be more robust (iteratively
+ // checking to see if we have all the files until we are sure), but the limit is currently 1000
+ // files/batch, far more than the number of store files under a single column family.
+ Collection familyNames = regionFs.getFamilies();
+ if (familyNames != null) {
+ for (String familyName: familyNames) {
+ Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName));
+ monitor.rethrowException();
+
+ Collection storeFiles = regionFs.getStoreFiles(familyName);
+ if (storeFiles == null) {
+ LOG.debug("No files under family: " + familyName);
+ continue;
+ }
+
+ // 2.1. build the snapshot reference for the store
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
+ }
+
+ // 2.2. iterate through all the store's files and create "references".
+ int i = 0;
+ int sz = storeFiles.size();
+ for (StoreFileInfo storeFile: storeFiles) {
+ monitor.rethrowException();
+
+ // create "reference" to this store file.
+ LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath());
+ visitor.storeFile(regionData, familyData, storeFile);
+ }
+ visitor.familyClose(regionData, familyData);
+ }
+ }
+ visitor.regionClose(regionData);
+ }
+
+ /**
+ * Load the information in the SnapshotManifest. Called by SnapshotManifest.open()
+ *
+ * If the format is v2 and there is no data-manifest, means that we are loading an
+ * in-progress snapshot. Since we support rolling-upgrades, we loook for v1 and v2
+ * regions format.
+ */
+ private void load() throws IOException {
+ switch (getSnapshotFormat(desc)) {
+ case SnapshotManifestV1.DESCRIPTOR_VERSION: {
+ this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
+ ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
+ try {
+ this.regionManifests =
+ SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+ } finally {
+ tpool.shutdown();
+ }
+ break;
+ }
+ case SnapshotManifestV2.DESCRIPTOR_VERSION: {
+ SnapshotDataManifest dataManifest = readDataManifest();
+ if (dataManifest != null) {
+ htd = HTableDescriptor.convert(dataManifest.getTableSchema());
+ regionManifests = dataManifest.getRegionManifestsList();
+ } else {
+ // Compatibility, load the v1 regions
+ // This happens only when the snapshot is in-progress and the cache wants to refresh.
+ List v1Regions, v2Regions;
+ ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
+ try {
+ v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+ v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+ } finally {
+ tpool.shutdown();
+ }
+ if (v1Regions != null && v2Regions != null) {
+ regionManifests =
+ new ArrayList(v1Regions.size() + v2Regions.size());
+ regionManifests.addAll(v1Regions);
+ regionManifests.addAll(v2Regions);
+ } else if (v1Regions != null) {
+ regionManifests = v1Regions;
+ } else /* if (v2Regions != null) */ {
+ regionManifests = v2Regions;
+ }
+ }
+ break;
+ }
+ default:
+ throw new CorruptedSnapshotException("Invalid Snapshot version: "+ desc.getVersion(), desc);
+ }
+ }
+
+ /**
+ * Get the current snapshot working dir
+ */
+ public Path getSnapshotDir() {
+ return this.workingDir;
+ }
+
+ /**
+ * Get the SnapshotDescription
+ */
+ public SnapshotDescription getSnapshotDescription() {
+ return this.desc;
+ }
+
+ /**
+ * Get the table descriptor from the Snapshot
+ */
+ public HTableDescriptor getTableDescriptor() {
+ return this.htd;
+ }
+
+ /**
+ * Get all the Region Manifest from the snapshot
+ */
+ public List getRegionManifests() {
+ return this.regionManifests;
+ }
+
+ /**
+ * Get all the Region Manifest from the snapshot.
+ * This is an helper to get a map with the region encoded name
+ */
+ public Map getRegionManifestsMap() {
+ if (regionManifests == null || regionManifests.size() == 0) return null;
+
+ HashMap regionsMap =
+ new HashMap(regionManifests.size());
+ for (SnapshotRegionManifest manifest: regionManifests) {
+ String regionName = getRegionNameFromManifest(manifest);
+ regionsMap.put(regionName, manifest);
+ }
+ return regionsMap;
+ }
+
+ public void consolidate() throws IOException {
+ if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
+ Path rootDir = FSUtils.getRootDir(conf);
+ LOG.info("Using old Snapshot Format");
+ // write a copy of descriptor to the snapshot directory
+ new FSTableDescriptors(fs, rootDir)
+ .createTableDescriptorForTableDirectory(workingDir, htd, false);
+ } else {
+ LOG.debug("Convert to Single Snapshot Manifest");
+ convertToV2SingleManifest();
+ }
+ }
+
+ /*
+ * In case of rolling-upgrade, we try to read all the formats and build
+ * the snapshot with the latest format.
+ */
+ private void convertToV2SingleManifest() throws IOException {
+ // Try to load v1 and v2 regions
+ List v1Regions, v2Regions;
+ ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
+ try {
+ v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+ v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc);
+ } finally {
+ tpool.shutdown();
+ }
+
+ SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
+ dataManifestBuilder.setTableSchema(htd.convert());
+
+ if (v1Regions != null && v1Regions.size() > 0) {
+ dataManifestBuilder.addAllRegionManifests(v1Regions);
+ }
+ if (v2Regions != null && v2Regions.size() > 0) {
+ dataManifestBuilder.addAllRegionManifests(v2Regions);
+ }
+
+ // Write the v2 Data Manifest.
+ // Once the data-manifest is written, the snapshot can be considered complete.
+ // Currently snapshots are written in a "temporary" directory and later
+ // moved to the "complated" snapshot directory.
+ SnapshotDataManifest dataManifest = dataManifestBuilder.build();
+ writeDataManifest(dataManifest);
+ this.regionManifests = dataManifest.getRegionManifestsList();
+
+ // Remove the region manifests. Everything is now in the data-manifest.
+ // The delete operation is "relaxed", unless we get an exception we keep going.
+ // The extra files in the snapshot directory will not give any problem,
+ // since they have the same content as the data manifest, and even by re-reading
+ // them we will get the same information.
+ if (v1Regions != null && v1Regions.size() > 0) {
+ for (SnapshotRegionManifest regionManifest: v1Regions) {
+ SnapshotManifestV1.deleteRegionManifest(fs, workingDir, regionManifest);
+ }
+ }
+ if (v2Regions != null && v2Regions.size() > 0) {
+ for (SnapshotRegionManifest regionManifest: v2Regions) {
+ SnapshotManifestV2.deleteRegionManifest(fs, workingDir, regionManifest);
+ }
+ }
+ }
+
+ /*
+ * Write the SnapshotDataManifest file
+ */
+ private void writeDataManifest(final SnapshotDataManifest manifest)
+ throws IOException {
+ FSDataOutputStream stream = fs.create(new Path(workingDir, DATA_MANIFEST_NAME));
+ try {
+ manifest.writeTo(stream);
+ } finally {
+ stream.close();
+ }
+ }
+
+ /*
+ * Read the SnapshotDataManifest file
+ */
+ private SnapshotDataManifest readDataManifest() throws IOException {
+ FSDataInputStream in = null;
+ try {
+ in = fs.open(new Path(workingDir, DATA_MANIFEST_NAME));
+ return SnapshotDataManifest.parseFrom(in);
+ } catch (FileNotFoundException e) {
+ return null;
+ } finally {
+ if (in != null) in.close();
+ }
+ }
+
+ private ThreadPoolExecutor createExecutor(final String name) {
+ return createExecutor(conf, name);
+ }
+
+ static ThreadPoolExecutor createExecutor(final Configuration conf, final String name) {
+ int maxThreads = conf.getInt("hbase.snapshot.thread.pool.max", 4);
+ return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS,
+ Threads.getNamedThreadFactory(name));
+ }
+
+ /**
+ * Extract the region encoded name from the region manifest
+ */
+ static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) {
+ byte[] regionName = HRegionInfo.createRegionName(
+ ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()),
+ manifest.getRegionInfo().getStartKey().toByteArray(),
+ manifest.getRegionInfo().getRegionId(), true);
+ return HRegionInfo.encodeRegionName(regionName);
+ }
+
+ /*
+ * Return the snapshot format
+ */
+ private static int getSnapshotFormat(final SnapshotDescription desc) {
+ return desc.hasVersion() ? desc.getVersion() : SnapshotManifestV1.DESCRIPTOR_VERSION;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
new file mode 100644
index 0000000..0da0367
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import com.google.protobuf.HBaseZeroCopyByteString;
+
+/**
+ * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}.
+ *
+ * Snapshot v1 layout format
+ * - Each region in the table is represented by a directory with the .hregioninfo file
+ * /snapshotName/regionName/.hregioninfo
+ * - Each file present in the table is represented by an empty file
+ * /snapshotName/regionName/familyName/fileName
+ */
+@InterfaceAudience.Private
+public class SnapshotManifestV1 {
+ private static final Log LOG = LogFactory.getLog(SnapshotManifestV1.class);
+
+ public static final int DESCRIPTOR_VERSION = 0;
+
+ private SnapshotManifestV1() {
+ }
+
+ static class ManifestBuilder implements SnapshotManifest.RegionVisitor<
+ HRegionFileSystem, Path> {
+ private final Configuration conf;
+ private final Path snapshotDir;
+ private final FileSystem fs;
+
+ public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
+ this.snapshotDir = snapshotDir;
+ this.conf = conf;
+ this.fs = fs;
+ }
+
+ public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
+ HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
+ fs, snapshotDir, regionInfo);
+ return snapshotRegionFs;
+ }
+
+ public void regionClose(final HRegionFileSystem region) {
+ }
+
+ public Path familyOpen(final HRegionFileSystem snapshotRegionFs, final byte[] familyName) {
+ Path familyDir = snapshotRegionFs.getStoreDir(Bytes.toString(familyName));
+ return familyDir;
+ }
+
+ public void familyClose(final HRegionFileSystem region, final Path family) {
+ }
+
+ public void storeFile(final HRegionFileSystem region, final Path familyDir,
+ final StoreFileInfo storeFile) throws IOException {
+ Path referenceFile = new Path(familyDir, storeFile.getPath().getName());
+ boolean success = true;
+ if (storeFile.isReference()) {
+ // write the Reference object to the snapshot
+ storeFile.getReference().write(fs, referenceFile);
+ } else {
+ // create "reference" to this store file. It is intentionally an empty file -- all
+ // necessary information is captured by its fs location and filename. This allows us to
+ // only figure out what needs to be done via a single nn operation (instead of having to
+ // open and read the files as well).
+ success = fs.createNewFile(referenceFile);
+ }
+ if (!success) {
+ throw new IOException("Failed to create reference file:" + referenceFile);
+ }
+ }
+ }
+
+ static List loadRegionManifests(final Configuration conf,
+ final Executor executor,final FileSystem fs, final Path snapshotDir,
+ final SnapshotDescription desc) throws IOException {
+ FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
+ if (regions == null) {
+ LOG.info("No regions under directory:" + snapshotDir);
+ return null;
+ }
+
+ final ExecutorCompletionService completionService =
+ new ExecutorCompletionService(executor);
+ for (final FileStatus region: regions) {
+ completionService.submit(new Callable() {
+ @Override
+ public SnapshotRegionManifest call() throws IOException {
+ HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
+ return buildManifestFromDisk(conf, fs, snapshotDir, hri);
+ }
+ });
+ }
+
+ ArrayList regionsManifest =
+ new ArrayList(regions.length);
+ try {
+ for (int i = 0; i < regions.length; ++i) {
+ regionsManifest.add(completionService.take().get());
+ }
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException(e.getMessage());
+ } catch (ExecutionException e) {
+ IOException ex = new IOException();
+ ex.initCause(e.getCause());
+ throw ex;
+ }
+ return regionsManifest;
+ }
+
+ static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir,
+ final SnapshotRegionManifest manifest) throws IOException {
+ String regionName = SnapshotManifest.getRegionNameFromManifest(manifest);
+ fs.delete(new Path(snapshotDir, regionName), true);
+ }
+
+ static SnapshotRegionManifest buildManifestFromDisk (final Configuration conf,
+ final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
+ HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
+ tableDir, regionInfo, true);
+ SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();
+
+ // 1. dump region meta info into the snapshot directory
+ LOG.debug("Storing region-info for snapshot.");
+ manifest.setRegionInfo(HRegionInfo.convert(regionInfo));
+
+ // 2. iterate through all the stores in the region
+ LOG.debug("Creating references for hfiles");
+
+ // This ensures that we have an atomic view of the directory as long as we have < ls limit
+ // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
+ // batches and may miss files being added/deleted. This could be more robust (iteratively
+ // checking to see if we have all the files until we are sure), but the limit is currently 1000
+ // files/batch, far more than the number of store files under a single column family.
+ Collection familyNames = regionFs.getFamilies();
+ if (familyNames != null) {
+ for (String familyName: familyNames) {
+ Collection storeFiles = regionFs.getStoreFiles(familyName, false);
+ if (storeFiles == null) {
+ LOG.debug("No files under family: " + familyName);
+ continue;
+ }
+
+ // 2.1. build the snapshot reference for the store
+ SnapshotRegionManifest.FamilyFiles.Builder family =
+ SnapshotRegionManifest.FamilyFiles.newBuilder();
+ family.setFamilyName(HBaseZeroCopyByteString.wrap(Bytes.toBytes(familyName)));
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
+ }
+
+ // 2.2. iterate through all the store's files and create "references".
+ int i = 0;
+ int sz = storeFiles.size();
+ for (StoreFileInfo storeFile: storeFiles) {
+ // create "reference" to this store file.
+ LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath());
+ SnapshotRegionManifest.StoreFile.Builder sfManifest =
+ SnapshotRegionManifest.StoreFile.newBuilder();
+ sfManifest.setName(storeFile.getPath().getName());
+ family.addStoreFiles(sfManifest.build());
+ }
+ manifest.addFamilyFiles(family.build());
+ }
+ }
+ return manifest.build();
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
new file mode 100644
index 0000000..2f446a5
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import com.google.protobuf.HBaseZeroCopyByteString;
+
+/**
+ * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}.
+ *
+ * Snapshot v2 layout format
+ * - Single Manifest file containing all the information of regions
+ * - In the online-snapshot case each region will write a "region manifest"
+ * /snapshotName/manifest.regionName
+ */
+@InterfaceAudience.Private
+public class SnapshotManifestV2 {
+ private static final Log LOG = LogFactory.getLog(SnapshotManifestV2.class);
+
+ public static final int DESCRIPTOR_VERSION = 2;
+
+ private static final String SNAPSHOT_MANIFEST_PREFIX = "region-manifest.";
+
+ static class ManifestBuilder implements SnapshotManifest.RegionVisitor<
+ SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> {
+ private final Configuration conf;
+ private final Path snapshotDir;
+ private final FileSystem fs;
+
+ public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
+ this.snapshotDir = snapshotDir;
+ this.conf = conf;
+ this.fs = fs;
+ }
+
+ public SnapshotRegionManifest.Builder regionOpen(final HRegionInfo regionInfo) {
+ SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();
+ manifest.setRegionInfo(HRegionInfo.convert(regionInfo));
+ return manifest;
+ }
+
+ public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException {
+ SnapshotRegionManifest manifest = region.build();
+ FSDataOutputStream stream = fs.create(getRegionManifestPath(snapshotDir, manifest));
+ try {
+ manifest.writeTo(stream);
+ } finally {
+ stream.close();
+ }
+ }
+
+ public SnapshotRegionManifest.FamilyFiles.Builder familyOpen(
+ final SnapshotRegionManifest.Builder region, final byte[] familyName) {
+ SnapshotRegionManifest.FamilyFiles.Builder family =
+ SnapshotRegionManifest.FamilyFiles.newBuilder();
+ family.setFamilyName(HBaseZeroCopyByteString.wrap(familyName));
+ return family;
+ }
+
+ public void familyClose(final SnapshotRegionManifest.Builder region,
+ final SnapshotRegionManifest.FamilyFiles.Builder family) {
+ region.addFamilyFiles(family.build());
+ }
+
+ public void storeFile(final SnapshotRegionManifest.Builder region,
+ final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) {
+ SnapshotRegionManifest.StoreFile.Builder sfManifest =
+ SnapshotRegionManifest.StoreFile.newBuilder();
+ sfManifest.setName(storeFile.getPath().getName());
+ if (storeFile.isReference()) {
+ sfManifest.setReference(storeFile.getReference().convert());
+ }
+ sfManifest.setFileSize(storeFile.getFileStatus().getLen());
+ family.addStoreFiles(sfManifest.build());
+ }
+ }
+
+ static List loadRegionManifests(final Configuration conf,
+ final Executor executor,final FileSystem fs, final Path snapshotDir,
+ final SnapshotDescription desc) throws IOException {
+ FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
+ @Override
+ public boolean accept(Path path) {
+ return path.getName().startsWith(SNAPSHOT_MANIFEST_PREFIX);
+ }
+ });
+
+ if (manifestFiles == null || manifestFiles.length == 0) return null;
+
+ final ExecutorCompletionService completionService =
+ new ExecutorCompletionService(executor);
+ for (final FileStatus st: manifestFiles) {
+ completionService.submit(new Callable() {
+ @Override
+ public SnapshotRegionManifest call() throws IOException {
+ FSDataInputStream stream = fs.open(st.getPath());
+ try {
+ return SnapshotRegionManifest.parseFrom(stream);
+ } finally {
+ stream.close();
+ }
+ }
+ });
+ }
+
+ ArrayList regionsManifest =
+ new ArrayList(manifestFiles.length);
+ try {
+ for (int i = 0; i < manifestFiles.length; ++i) {
+ regionsManifest.add(completionService.take().get());
+ }
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException(e.getMessage());
+ } catch (ExecutionException e) {
+ IOException ex = new IOException();
+ ex.initCause(e.getCause());
+ throw ex;
+ }
+ return regionsManifest;
+ }
+
+ static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir,
+ final SnapshotRegionManifest manifest) throws IOException {
+ fs.delete(getRegionManifestPath(snapshotDir, manifest), true);
+ }
+
+ private static Path getRegionManifestPath(final Path snapshotDir,
+ final SnapshotRegionManifest manifest) {
+ String regionName = SnapshotManifest.getRegionNameFromManifest(manifest);
+ return new Path(snapshotDir, SNAPSHOT_MANIFEST_PREFIX + regionName);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
index dc977bb..f9b0638 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
@@ -19,24 +19,28 @@
package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
-import java.io.FileNotFoundException;
+import java.io.InterruptedIOException;
import java.util.HashSet;
-import java.util.LinkedList;
import java.util.List;
-import java.util.Map;
import java.util.Set;
-import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.FSVisitor;
/**
@@ -44,8 +48,15 @@ import org.apache.hadoop.hbase.util.FSVisitor;
*/
@InterfaceAudience.Private
public final class SnapshotReferenceUtil {
- public interface FileVisitor extends FSVisitor.StoreFileVisitor,
- FSVisitor.RecoveredEditsVisitor, FSVisitor.LogFileVisitor {
+ public static final Log LOG = LogFactory.getLog(SnapshotReferenceUtil.class);
+
+ public interface StoreFileVisitor {
+ void storeFile(final HRegionInfo regionInfo, final String familyName,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException;
+ }
+
+ public interface SnapshotVisitor extends StoreFileVisitor,
+ FSVisitor.LogFileVisitor {
}
private SnapshotReferenceUtil() {
@@ -64,80 +75,79 @@ public final class SnapshotReferenceUtil {
}
/**
- * Get the snapshotted recovered.edits dir for the specified region.
- *
- * @param snapshotDir directory where the specific snapshot is stored
- * @param regionName name of the region
- * @return path to the recovered.edits directory for the specified region files.
- */
- public static Path getRecoveredEditsDir(Path snapshotDir, String regionName) {
- return HLogUtil.getRegionDirRecoveredEditsDir(new Path(snapshotDir, regionName));
- }
-
- /**
- * Get the snapshot recovered.edits file
+ * Iterate over the snapshot store files, restored.edits and logs
*
- * @param snapshotDir directory where the specific snapshot is stored
- * @param regionName name of the region
- * @param logfile name of the edit file
- * @return full path of the log file for the specified region files.
+ * @param conf The current {@link Configuration} instance.
+ * @param fs {@link FileSystem}
+ * @param snapshotDir {@link Path} to the Snapshot directory
+ * @param visitor callback object to get the referenced files
+ * @throws IOException if an error occurred while scanning the directory
*/
- public static Path getRecoveredEdits(Path snapshotDir, String regionName, String logfile) {
- return new Path(getRecoveredEditsDir(snapshotDir, regionName), logfile);
+ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs,
+ final Path snapshotDir, final SnapshotVisitor visitor)
+ throws IOException {
+ SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+ visitReferencedFiles(conf, fs, snapshotDir, desc, visitor);
}
/**
* Iterate over the snapshot store files, restored.edits and logs
*
+ * @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
+ * @param desc the {@link SnapshotDescription} of the snapshot to verify
* @param visitor callback object to get the referenced files
* @throws IOException if an error occurred while scanning the directory
*/
- public static void visitReferencedFiles(final FileSystem fs, final Path snapshotDir,
- final FileVisitor visitor) throws IOException {
- visitTableStoreFiles(fs, snapshotDir, visitor);
- visitRecoveredEdits(fs, snapshotDir, visitor);
+ public static void visitReferencedFiles(final Configuration conf, final FileSystem fs,
+ final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor)
+ throws IOException {
+ visitTableStoreFiles(conf, fs, snapshotDir, desc, visitor);
visitLogFiles(fs, snapshotDir, visitor);
}
/**
* Iterate over the snapshot store files
*
+ * @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
+ * @param desc the {@link SnapshotDescription} of the snapshot to verify
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
- public static void visitTableStoreFiles(final FileSystem fs, final Path snapshotDir,
- final FSVisitor.StoreFileVisitor visitor) throws IOException {
- FSVisitor.visitTableStoreFiles(fs, snapshotDir, visitor);
+ public static void visitTableStoreFiles(final Configuration conf, final FileSystem fs,
+ final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor)
+ throws IOException {
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc);
+ List regionManifests = manifest.getRegionManifests();
+ if (regionManifests == null || regionManifests.size() == 0) {
+ LOG.debug("No manifest files present: " + snapshotDir);
+ return;
+ }
+
+ for (SnapshotRegionManifest regionManifest: regionManifests) {
+ visitRegionStoreFiles(regionManifest, visitor);
+ }
}
/**
* Iterate over the snapshot store files in the specified region
*
- * @param fs {@link FileSystem}
- * @param regionDir {@link Path} to the Snapshot region directory
+ * @param manifest snapshot manifest to inspect
* @param visitor callback object to get the store files
* @throws IOException if an error occurred while scanning the directory
*/
- public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir,
- final FSVisitor.StoreFileVisitor visitor) throws IOException {
- FSVisitor.visitRegionStoreFiles(fs, regionDir, visitor);
- }
-
- /**
- * Iterate over the snapshot recovered.edits
- *
- * @param fs {@link FileSystem}
- * @param snapshotDir {@link Path} to the Snapshot directory
- * @param visitor callback object to get the recovered.edits files
- * @throws IOException if an error occurred while scanning the directory
- */
- public static void visitRecoveredEdits(final FileSystem fs, final Path snapshotDir,
- final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
- FSVisitor.visitTableRecoveredEdits(fs, snapshotDir, visitor);
+ public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest,
+ final StoreFileVisitor visitor) throws IOException {
+ HRegionInfo regionInfo = HRegionInfo.convert(manifest.getRegionInfo());
+ for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) {
+ String familyName = familyFiles.getFamilyName().toStringUtf8();
+ for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) {
+ visitor.storeFile(regionInfo, familyName, storeFile);
+ }
+ }
}
/**
@@ -165,85 +175,151 @@ public final class SnapshotReferenceUtil {
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException {
- final TableName table = TableName.valueOf(snapshotDesc.getTable());
- visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
- HFileLink link = HFileLink.create(conf, table, region, family, hfile);
- try {
- link.getFileStatus(fs);
- } catch (FileNotFoundException e) {
- throw new CorruptedSnapshotException("Corrupted snapshot '" + snapshotDesc + "'", e);
- }
- }
- });
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
+ verifySnapshot(conf, fs, manifest);
}
/**
- * Returns the set of region names available in the snapshot.
+ * Verify the validity of the snapshot
*
+ * @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
- * @param snapshotDir {@link Path} to the Snapshot directory
+ * @param manifest snapshot manifest to inspect
+ * @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
- * @return the set of the regions contained in the snapshot
*/
- public static Set getSnapshotRegionNames(final FileSystem fs, final Path snapshotDir)
- throws IOException {
- FileStatus[] regionDirs = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
- if (regionDirs == null) return null;
+ public static void verifySnapshot(final Configuration conf, final FileSystem fs,
+ final SnapshotManifest manifest) throws IOException {
+ final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
+ final Path snapshotDir = manifest.getSnapshotDir();
- Set regions = new HashSet();
- for (FileStatus regionDir: regionDirs) {
- regions.add(regionDir.getPath().getName());
+ List regionManifests = manifest.getRegionManifests();
+ if (regionManifests == null || regionManifests.size() == 0) {
+ LOG.debug("No manifest files present: " + snapshotDir);
+ return;
+ }
+
+ ExecutorService exec = SnapshotManifest.createExecutor(conf, "VerifySnapshot");
+ final ExecutorCompletionService completionService =
+ new ExecutorCompletionService(exec);
+ try {
+ for (final SnapshotRegionManifest regionManifest: regionManifests) {
+ completionService.submit(new Callable() {
+ @Override
+ public Void call() throws IOException {
+ visitRegionStoreFiles(regionManifest, new StoreFileVisitor() {
+ @Override
+ public void storeFile(final HRegionInfo regionInfo, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile);
+ }
+ });
+ return null;
+ }
+ });
+ }
+ try {
+ for (int i = 0; i < regionManifests.size(); ++i) {
+ completionService.take().get();
+ }
+ } catch (InterruptedException e) {
+ throw new InterruptedIOException(e.getMessage());
+ } catch (ExecutionException e) {
+ if (e.getCause() instanceof CorruptedSnapshotException) {
+ throw new CorruptedSnapshotException(e.getCause().getMessage(), snapshotDesc);
+ } else {
+ IOException ex = new IOException();
+ ex.initCause(e.getCause());
+ throw ex;
+ }
+ }
+ } finally {
+ exec.shutdown();
}
- return regions;
}
/**
- * Get the list of hfiles for the specified snapshot region.
- * NOTE: The current implementation keeps one empty file per HFile in the region.
- * The file name matches the one in the original table, and by reconstructing
- * the path you can quickly jump to the referenced file.
+ * Verify the validity of the snapshot store file
*
+ * @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
- * @param snapshotRegionDir {@link Path} to the Snapshot region directory
- * @return Map of hfiles per family, the key is the family name and values are hfile names
+ * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
+ * @param snapshot the {@link SnapshotDescription} of the snapshot to verify
+ * @param regionInfo {@link HRegionInfo} of the region that contains the store file
+ * @param family family that contains the store file
+ * @param storeFile the store file to verify
+ * @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
- public static Map> getRegionHFileReferences(final FileSystem fs,
- final Path snapshotRegionDir) throws IOException {
- final Map> familyFiles = new TreeMap>();
+ public static void verifyStoreFile(final Configuration conf, final FileSystem fs,
+ final Path snapshotDir, final SnapshotDescription snapshot, final HRegionInfo regionInfo,
+ final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ String fileName = storeFile.getName();
- visitRegionStoreFiles(fs, snapshotRegionDir,
- new FSVisitor.StoreFileVisitor() {
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
- List hfiles = familyFiles.get(family);
- if (hfiles == null) {
- hfiles = new LinkedList();
- familyFiles.put(family, hfiles);
- }
- hfiles.add(hfile);
- }
- });
+ Path refPath = null;
+ if (StoreFileInfo.isReference(fileName)) {
+ // If is a reference file check if the parent file is present in the snapshot
+ Path snapshotHFilePath = new Path(new Path(
+ new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName);
+ refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath);
+ if (!fs.exists(refPath)) {
+ throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot);
+ }
+ }
+
+ Path linkPath;
+ if (refPath != null && HFileLink.isHFileLink(refPath)) {
+ linkPath = new Path(family, refPath.getName());
+ } else if (HFileLink.isHFileLink(fileName)) {
+ linkPath = new Path(family, fileName);
+ } else {
+ linkPath = new Path(family, HFileLink.createHFileLinkName(
+ TableName.valueOf(snapshot.getTable()), regionInfo.getEncodedName(), fileName));
+ }
- return familyFiles;
+ // check if the linked file exists (in the archive, or in the table dir)
+ HFileLink link = new HFileLink(conf, linkPath);
+ if (!link.exists(fs)) {
+ throw new CorruptedSnapshotException("Can't find hfile: " + fileName
+ + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath()
+ + ") directory for the primary table.", snapshot);
+ }
+ }
+
+ /**
+ * Returns the store file names in the snapshot.
+ *
+ * @param conf The current {@link Configuration} instance.
+ * @param fs {@link FileSystem}
+ * @param snapshotDir {@link Path} to the Snapshot directory
+ * @throws IOException if an error occurred while scanning the directory
+ * @return the names of hfiles in the specified snaphot
+ */
+ public static Set getHFileNames(final Configuration conf, final FileSystem fs,
+ final Path snapshotDir) throws IOException {
+ SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
+ return getHFileNames(conf, fs, snapshotDir, desc);
}
/**
* Returns the store file names in the snapshot.
*
+ * @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory
+ * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to inspect
* @throws IOException if an error occurred while scanning the directory
* @return the names of hfiles in the specified snaphot
*/
- public static Set getHFileNames(final FileSystem fs, final Path snapshotDir)
+ private static Set getHFileNames(final Configuration conf, final FileSystem fs,
+ final Path snapshotDir, final SnapshotDescription snapshotDesc)
throws IOException {
final Set names = new HashSet();
- visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() {
- public void storeFile (final String region, final String family, final String hfile)
- throws IOException {
+ visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() {
+ @Override
+ public void storeFile(final HRegionInfo regionInfo, final String family,
+ final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+ String hfile = storeFile.getName();
if (HFileLink.isHFileLink(hfile)) {
names.add(HFileLink.getReferencedHFileName(hfile));
} else {
@@ -266,6 +342,7 @@ public final class SnapshotReferenceUtil {
throws IOException {
final Set names = new HashSet();
visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
+ @Override
public void logFile (final String server, final String logfile) throws IOException {
names.add(logfile);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java
deleted file mode 100644
index ede2d85..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotTask.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-
-/**
- * General snapshot operation taken on a regionserver
- */
-@InterfaceAudience.Private
-public abstract class SnapshotTask implements ForeignExceptionSnare, Callable{
-
- protected final SnapshotDescription snapshot;
- protected final ForeignExceptionDispatcher errorMonitor;
-
- /**
- * @param snapshot Description of the snapshot we are going to operate on
- * @param monitor listener interested in failures to the snapshot caused by this operation
- */
- public SnapshotTask(SnapshotDescription snapshot, ForeignExceptionDispatcher monitor) {
- assert monitor != null : "ForeignExceptionDispatcher must not be null!";
- assert snapshot != null : "SnapshotDescription must not be null!";
- this.snapshot = snapshot;
- this.errorMonitor = monitor;
- }
-
- public void snapshotFailure(String message, Exception e) {
- ForeignException ee = new ForeignException(message, e);
- errorMonitor.receive(ee);
- }
-
- @Override
- public void rethrowException() throws ForeignException {
- this.errorMonitor.rethrowException();
- }
-
- @Override
- public boolean hasException() {
- return this.errorMonitor.hasException();
- }
-
- @Override
- public ForeignException getException() {
- return this.errorMonitor.getException();
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java
deleted file mode 100644
index ec50b71..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-
-/**
- * Copy the table info into the snapshot directory
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class TableInfoCopyTask extends SnapshotTask {
-
- public static final Log LOG = LogFactory.getLog(TableInfoCopyTask.class);
- private final FileSystem fs;
- private final Path rootDir;
-
- /**
- * Copy the table info for the given table into the snapshot
- * @param monitor listen for errors while running the snapshot
- * @param snapshot snapshot for which we are copying the table info
- * @param fs {@link FileSystem} where the tableinfo is stored (and where the copy will be written)
- * @param rootDir root of the {@link FileSystem} where the tableinfo is stored
- */
- public TableInfoCopyTask(ForeignExceptionDispatcher monitor,
- SnapshotDescription snapshot, FileSystem fs, Path rootDir) {
- super(snapshot, monitor);
- this.rootDir = rootDir;
- this.fs = fs;
- }
-
- @Override
- public Void call() throws Exception {
- LOG.debug("Running table info copy.");
- this.rethrowException();
- LOG.debug("Attempting to copy table info for snapshot:"
- + ClientSnapshotDescriptionUtils.toString(this.snapshot));
- // get the HTable descriptor
- HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
- TableName.valueOf(this.snapshot.getTable()));
- this.rethrowException();
- // write a copy of descriptor to the snapshot directory
- Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
- new FSTableDescriptors(fs, rootDir)
- .createTableDescriptorForTableDirectory(snapshotDir, orig, false);
- LOG.debug("Finished copying tableinfo.");
- return null;
- }
-}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java
deleted file mode 100644
index fdc1834..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java
+++ /dev/null
@@ -1,309 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener;
-import org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-
-/**
- * Utilities for useful when taking a snapshot
- */
-public class TakeSnapshotUtils {
-
- private static final Log LOG = LogFactory.getLog(TakeSnapshotUtils.class);
-
- private TakeSnapshotUtils() {
- // private constructor for util class
- }
-
- /**
- * Get the per-region snapshot description location.
- *
- * Under the per-snapshot directory, specific files per-region are kept in a similar layout as per
- * the current directory layout.
- * @param desc description of the snapshot
- * @param rootDir root directory for the hbase installation
- * @param regionName encoded name of the region (see {@link HRegionInfo#encodeRegionName(byte[])})
- * @return path to the per-region directory for the snapshot
- */
- public static Path getRegionSnapshotDirectory(SnapshotDescription desc, Path rootDir,
- String regionName) {
- Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
- return HRegion.getRegionDir(snapshotDir, regionName);
- }
-
- /**
- * Get the snapshot directory for each family to be added to the the snapshot
- * @param snapshot description of the snapshot being take
- * @param snapshotRegionDir directory in the snapshot where the region directory information
- * should be stored
- * @param families families to be added (can be null)
- * @return paths to the snapshot directory for each family, in the same order as the families
- * passed in
- */
- public static List getFamilySnapshotDirectories(SnapshotDescription snapshot,
- Path snapshotRegionDir, FileStatus[] families) {
- if (families == null || families.length == 0) return Collections.emptyList();
-
- List familyDirs = new ArrayList(families.length);
- for (FileStatus family : families) {
- // build the reference directory name
- familyDirs.add(new Path(snapshotRegionDir, family.getPath().getName()));
- }
- return familyDirs;
- }
-
- /**
- * Create a snapshot timer for the master which notifies the monitor when an error occurs
- * @param snapshot snapshot to monitor
- * @param conf configuration to use when getting the max snapshot life
- * @param monitor monitor to notify when the snapshot life expires
- * @return the timer to use update to signal the start and end of the snapshot
- */
- public static TimeoutExceptionInjector getMasterTimerAndBindToMonitor(SnapshotDescription snapshot,
- Configuration conf, ForeignExceptionListener monitor) {
- long maxTime = SnapshotDescriptionUtils.getMaxMasterTimeout(conf, snapshot.getType(),
- SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME);
- return new TimeoutExceptionInjector(monitor, maxTime);
- }
-
- /**
- * Verify that all the expected logs got referenced
- * @param fs filesystem where the logs live
- * @param logsDir original logs directory
- * @param serverNames names of the servers that involved in the snapshot
- * @param snapshot description of the snapshot being taken
- * @param snapshotLogDir directory for logs in the snapshot
- * @throws IOException
- */
- public static void verifyAllLogsGotReferenced(FileSystem fs, Path logsDir,
- Set serverNames, SnapshotDescription snapshot, Path snapshotLogDir)
- throws IOException {
- assertTrue(snapshot, "Logs directory doesn't exist in snapshot", fs.exists(logsDir));
- // for each of the server log dirs, make sure it matches the main directory
- Multimap snapshotLogs = getMapOfServersAndLogs(fs, snapshotLogDir, serverNames);
- Multimap realLogs = getMapOfServersAndLogs(fs, logsDir, serverNames);
- if (realLogs != null) {
- assertNotNull(snapshot, "No server logs added to snapshot", snapshotLogs);
- } else {
- assertNull(snapshot, "Snapshotted server logs that don't exist", snapshotLogs);
- }
-
- // check the number of servers
- Set>> serverEntries = realLogs.asMap().entrySet();
- Set>> snapshotEntries = snapshotLogs.asMap().entrySet();
- assertEquals(snapshot, "Not the same number of snapshot and original server logs directories",
- serverEntries.size(), snapshotEntries.size());
-
- // verify we snapshotted each of the log files
- for (Entry> serverLogs : serverEntries) {
- // if the server is not the snapshot, skip checking its logs
- if (!serverNames.contains(serverLogs.getKey())) continue;
- Collection snapshotServerLogs = snapshotLogs.get(serverLogs.getKey());
- assertNotNull(snapshot, "Snapshots missing logs for server:" + serverLogs.getKey(),
- snapshotServerLogs);
-
- // check each of the log files
- assertEquals(snapshot,
- "Didn't reference all the log files for server:" + serverLogs.getKey(), serverLogs
- .getValue().size(), snapshotServerLogs.size());
- for (String log : serverLogs.getValue()) {
- assertTrue(snapshot, "Snapshot logs didn't include " + log,
- snapshotServerLogs.contains(log));
- }
- }
- }
-
- /**
- * Verify one of a snapshot's region's recovered.edits, has been at the surface (file names,
- * length), match the original directory.
- * @param fs filesystem on which the snapshot had been taken
- * @param rootDir full path to the root hbase directory
- * @param regionInfo info for the region
- * @param snapshot description of the snapshot that was taken
- * @throws IOException if there is an unexpected error talking to the filesystem
- */
- public static void verifyRecoveredEdits(FileSystem fs, Path rootDir, HRegionInfo regionInfo,
- SnapshotDescription snapshot) throws IOException {
- Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
- Path editsDir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
- Path snapshotRegionDir = TakeSnapshotUtils.getRegionSnapshotDirectory(snapshot, rootDir,
- regionInfo.getEncodedName());
- Path snapshotEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir);
-
- FileStatus[] edits = FSUtils.listStatus(fs, editsDir);
- FileStatus[] snapshotEdits = FSUtils.listStatus(fs, snapshotEditsDir);
- if (edits == null) {
- assertNull(snapshot, "Snapshot has edits but table doesn't", snapshotEdits);
- return;
- }
-
- assertNotNull(snapshot, "Table has edits, but snapshot doesn't", snapshotEdits);
-
- // check each of the files
- assertEquals(snapshot, "Not same number of edits in snapshot as table", edits.length,
- snapshotEdits.length);
-
- // make sure we have a file with the same name as the original
- // it would be really expensive to verify the content matches the original
- for (FileStatus edit : edits) {
- for (FileStatus sEdit : snapshotEdits) {
- if (sEdit.getPath().equals(edit.getPath())) {
- assertEquals(snapshot, "Snapshot file" + sEdit.getPath()
- + " length not equal to the original: " + edit.getPath(), edit.getLen(),
- sEdit.getLen());
- break;
- }
- }
- assertTrue(snapshot, "No edit in snapshot with name:" + edit.getPath(), false);
- }
- }
-
- private static void assertNull(SnapshotDescription snapshot, String msg, Object isNull)
- throws CorruptedSnapshotException {
- if (isNull != null) {
- throw new CorruptedSnapshotException(msg + ", Expected " + isNull + " to be null.", snapshot);
- }
- }
-
- private static void assertNotNull(SnapshotDescription snapshot, String msg, Object notNull)
- throws CorruptedSnapshotException {
- if (notNull == null) {
- throw new CorruptedSnapshotException(msg + ", Expected object to not be null, but was null.",
- snapshot);
- }
- }
-
- private static void assertTrue(SnapshotDescription snapshot, String msg, boolean isTrue)
- throws CorruptedSnapshotException {
- if (!isTrue) {
- throw new CorruptedSnapshotException(msg + ", Expected true, but was false", snapshot);
- }
- }
-
- /**
- * Assert that the expect matches the gotten amount
- * @param msg message to add the to exception
- * @param expected
- * @param gotten
- * @throws CorruptedSnapshotException thrown if the two elements don't match
- */
- private static void assertEquals(SnapshotDescription snapshot, String msg, int expected,
- int gotten) throws CorruptedSnapshotException {
- if (expected != gotten) {
- throw new CorruptedSnapshotException(msg + ". Expected:" + expected + ", got:" + gotten,
- snapshot);
- }
- }
-
- /**
- * Assert that the expect matches the gotten amount
- * @param msg message to add the to exception
- * @param expected
- * @param gotten
- * @throws CorruptedSnapshotException thrown if the two elements don't match
- */
- private static void assertEquals(SnapshotDescription snapshot, String msg, long expected,
- long gotten) throws CorruptedSnapshotException {
- if (expected != gotten) {
- throw new CorruptedSnapshotException(msg + ". Expected:" + expected + ", got:" + gotten,
- snapshot);
- }
- }
-
- /**
- * @param logdir
- * @param toInclude list of servers to include. If empty or null, returns all servers
- * @return maps of servers to all their log files. If there is no log directory, returns
- * null
- */
- private static Multimap getMapOfServersAndLogs(FileSystem fs, Path logdir,
- Collection toInclude) throws IOException {
- // create a path filter based on the passed directories to include
- PathFilter filter = toInclude == null || toInclude.size() == 0 ? null
- : new MatchesDirectoryNames(toInclude);
-
- // get all the expected directories
- FileStatus[] serverLogDirs = FSUtils.listStatus(fs, logdir, filter);
- if (serverLogDirs == null) return null;
-
- // map those into a multimap of servername -> [log files]
- Multimap map = HashMultimap.create();
- for (FileStatus server : serverLogDirs) {
- FileStatus[] serverLogs = FSUtils.listStatus(fs, server.getPath(), null);
- if (serverLogs == null) continue;
- for (FileStatus log : serverLogs) {
- map.put(server.getPath().getName(), log.getPath().getName());
- }
- }
- return map;
- }
-
- /**
- * Path filter that only accepts paths where that have a {@link Path#getName()} that is contained
- * in the specified collection.
- */
- private static class MatchesDirectoryNames implements PathFilter {
-
- Collection paths;
-
- public MatchesDirectoryNames(Collection dirNames) {
- this.paths = dirNames;
- }
-
- @Override
- public boolean accept(Path path) {
- return paths.contains(path.getName());
- }
- }
-
- /**
- * Get the log directory for a specific snapshot
- * @param snapshotDir directory where the specific snapshot will be store
- * @param serverName name of the parent regionserver for the log files
- * @return path to the log home directory for the archive files.
- */
- public static Path getSnapshotHLogsDir(Path snapshotDir, String serverName) {
- return new Path(snapshotDir, HLogUtil.getHLogDirectoryName(serverName));
- }
-}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
index 75729b6..c771144 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java
@@ -150,7 +150,7 @@ public final class FSVisitor {
final FSVisitor.RecoveredEditsVisitor visitor) throws IOException {
FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regions == null) {
- LOG.info("No regions under directory:" + tableDir);
+ LOG.info("No recoveredEdits regions under directory:" + tableDir);
return;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
index ef53ad2..75f1c10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
@@ -20,9 +20,7 @@ package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
-import java.util.HashSet;
import java.util.List;
-import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -39,9 +37,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@@ -182,15 +180,6 @@ public class TestSnapshotFromClient {
HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
UTIL.loadTable(table, TEST_FAM, false);
- // get the name of all the regionservers hosting the snapshotted table
- Set snapshotServers = new HashSet();
- List servers = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
- for (RegionServerThread server : servers) {
- if (server.getRegionServer().getOnlineRegions(TABLE_NAME).size() > 0) {
- snapshotServers.add(server.getRegionServer().getServerName().toString());
- }
- }
-
LOG.debug("FS state before disable:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
@@ -203,8 +192,16 @@ public class TestSnapshotFromClient {
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the disabled table
- byte[] snapshot = Bytes.toBytes("offlineTableSnapshot");
- admin.snapshot(snapshot, TABLE_NAME);
+ final String SNAPSHOT_NAME = "offlineTableSnapshot";
+ byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
+
+ SnapshotDescription desc = SnapshotDescription.newBuilder()
+ .setType(SnapshotDescription.Type.DISABLED)
+ .setTable(STRING_TABLE_NAME)
+ .setName(SNAPSHOT_NAME)
+ .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
+ .build();
+ admin.snapshot(desc);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
@@ -219,7 +216,7 @@ public class TestSnapshotFromClient {
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
- admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), snapshotServers);
+ admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
@@ -263,15 +260,6 @@ public class TestSnapshotFromClient {
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
- // get the name of all the regionservers hosting the snapshotted table
- Set snapshotServers = new HashSet();
- List servers = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
- for (RegionServerThread server : servers) {
- if (server.getRegionServer().getOnlineRegions(TABLE_NAME).size() > 0) {
- snapshotServers.add(server.getRegionServer().getServerName().toString());
- }
- }
-
LOG.debug("FS state before disable:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
@@ -299,8 +287,8 @@ public class TestSnapshotFromClient {
List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
List nonEmptyCfs = Lists.newArrayList();
- SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs, rootDir,
- admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), snapshotServers);
+ SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs,
+ rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
index 277a458..8b2dd1d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
@@ -29,13 +29,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
@@ -94,7 +94,7 @@ public class TestTableSnapshotScanner {
HTable table = new HTable(util.getConfiguration(), tableName);
util.loadTable(table, FAMILIES);
- Path rootDir = new Path(util.getConfiguration().get(HConstants.HBASE_DIR));
+ Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
@@ -124,7 +124,7 @@ public class TestTableSnapshotScanner {
testScanner(UTIL, "testWithMultiRegion", 20, true);
}
- private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
+ private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
boolean shutdownCluster) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testScanner");
@@ -138,7 +138,7 @@ public class TestTableSnapshotScanner {
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
Scan scan = new Scan(bbb, yyy); // limit the scan
- TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
+ TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
snapshotName, scan);
verifyScanner(scanner, bbb, yyy);
@@ -155,7 +155,7 @@ public class TestTableSnapshotScanner {
private void verifyScanner(ResultScanner scanner, byte[] startRow, byte[] stopRow)
throws IOException, InterruptedException {
- HBaseTestingUtility.SeenRowTracker rowTracker =
+ HBaseTestingUtility.SeenRowTracker rowTracker =
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
while (true) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index e02d504..9f2d390 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
@@ -44,12 +43,12 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
@@ -59,7 +58,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.junit.After;
import org.junit.Assert;
-import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -125,17 +123,17 @@ public class TestTableSnapshotInputFormat {
Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2);
- Assert.assertEquals(Lists.newArrayList("h1", "h2"),
+ Assert.assertEquals(Lists.newArrayList("h1", "h2"),
tsif.getBestLocations(conf, blockDistribution));
blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3);
- Assert.assertEquals(Lists.newArrayList("h2", "h1"),
+ Assert.assertEquals(Lists.newArrayList("h2", "h1"),
tsif.getBestLocations(conf, blockDistribution));
blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6);
blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9);
- Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"),
+ Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"),
tsif.getBestLocations(conf, blockDistribution));
}
@@ -156,7 +154,7 @@ public class TestTableSnapshotInputFormat {
public static class TestTableSnapshotReducer
extends Reducer {
- HBaseTestingUtility.SeenRowTracker rowTracker =
+ HBaseTestingUtility.SeenRowTracker rowTracker =
new HBaseTestingUtility.SeenRowTracker(bbb, yyy);
@Override
protected void reduce(ImmutableBytesWritable key, Iterable values,
@@ -191,7 +189,7 @@ public class TestTableSnapshotInputFormat {
HTable table = new HTable(util.getConfiguration(), tableName);
util.loadTable(table, FAMILIES);
- Path rootDir = new Path(util.getConfiguration().get(HConstants.HBASE_DIR));
+ Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
@@ -250,7 +248,7 @@ public class TestTableSnapshotInputFormat {
testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 8);
}
- public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
+ public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
int numRegions, int expectedNumSplits) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testWithMockedMapReduce");
@@ -282,7 +280,7 @@ public class TestTableSnapshotInputFormat {
Assert.assertEquals(expectedNumSplits, splits.size());
- HBaseTestingUtility.SeenRowTracker rowTracker =
+ HBaseTestingUtility.SeenRowTracker rowTracker =
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
for (int i = 0; i < splits.size(); i++) {
@@ -293,7 +291,7 @@ public class TestTableSnapshotInputFormat {
// validate record reader
TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
- RecordReader rr =
+ RecordReader rr =
tsif.createRecordReader(split, taskAttemptContext);
rr.initialize(split, taskAttemptContext);
@@ -311,7 +309,7 @@ public class TestTableSnapshotInputFormat {
rowTracker.validate();
}
- public static void verifyRowFromMap(ImmutableBytesWritable key, Result result)
+ public static void verifyRowFromMap(ImmutableBytesWritable key, Result result)
throws IOException {
byte[] row = key.get();
CellScanner scanner = result.cellScanner();
@@ -363,7 +361,7 @@ public class TestTableSnapshotInputFormat {
// this is also called by the IntegrationTestTableSnapshotInputFormat
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
- String snapshotName, Path tableDir, int numRegions, int expectedNumSplits,
+ String snapshotName, Path tableDir, int numRegions, int expectedNumSplits,
boolean shutdownCluster) throws Exception {
//create the table and snapshot
@@ -379,7 +377,7 @@ public class TestTableSnapshotInputFormat {
Scan scan = new Scan(bbb, yyy); // limit the scan
job.setJarByClass(util.getClass());
- TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
+ TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
TestTableSnapshotInputFormat.class);
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
index e62b788..f1c5adc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
@@ -26,6 +26,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.Bytes;
@@ -330,19 +332,20 @@ public class TestSnapshotFromMaster {
// get the snapshot files for the table
Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
- Path[] snapshotHFiles = SnapshotTestingUtils.listHFiles(fs, snapshotTable);
+ Set snapshotHFiles = SnapshotReferenceUtil.getHFileNames(
+ UTIL.getConfiguration(), fs, snapshotTable);
// check that the files in the archive contain the ones that we need for the snapshot
LOG.debug("Have snapshot hfiles:");
- for (Path file : snapshotHFiles) {
- LOG.debug(file);
+ for (String fileName : snapshotHFiles) {
+ LOG.debug(fileName);
}
// get the archived files for the table
Collection files = getArchivedHFiles(archiveDir, rootDir, fs, TABLE_NAME);
// and make sure that there is a proper subset
- for (Path file : snapshotHFiles) {
- assertTrue("Archived hfiles " + files + " is missing snapshot file:" + file,
- files.contains(file.getName()));
+ for (String fileName : snapshotHFiles) {
+ assertTrue("Archived hfiles " + files + " is missing snapshot file:" + fileName,
+ files.contains(fileName));
}
// delete the existing snapshot
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
index 409f697..e0b3782 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
@@ -22,7 +22,9 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Collection;
+import java.util.ArrayList;
import java.util.HashSet;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -31,9 +33,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.AfterClass;
@@ -49,6 +52,7 @@ public class TestSnapshotFileCache {
private static final Log LOG = LogFactory.getLog(TestSnapshotFileCache.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static long sequenceId = 0;
private static FileSystem fs;
private static Path rootDir;
@@ -72,86 +76,17 @@ public class TestSnapshotFileCache {
}
@Test(timeout = 10000000)
- public void testLoadAndDelete() throws Exception {
+ public void testLoadAndDelete() throws IOException {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
- Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
- Path snapshot = new Path(snapshotDir, "snapshot");
- Path region = new Path(snapshot, "7e91021");
- Path family = new Path(region, "fam");
- Path file1 = new Path(family, "file1");
- Path file2 = new Path(family, "file2");
-
- // create two hfiles under the snapshot
- fs.createNewFile(file1);
- fs.createNewFile(file2);
-
- FSUtils.logFileSystemState(fs, rootDir, LOG);
-
- // then make sure the cache finds them
- assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
- assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
- String not = "file-shouldn't-be-found";
- assertFalse("Cache found '" + not + "', but it shouldn't have.", cache.contains(not));
-
- // make sure we get a little bit of separation in the modification times
- // its okay if we sleep a little longer (b/c of GC pause), as long as we sleep a little
- Thread.sleep(10);
-
- LOG.debug("Deleting snapshot.");
- // then delete the snapshot and make sure that we can still find the files
- if (!fs.delete(snapshot, true)) {
- throw new IOException("Couldn't delete " + snapshot + " for an unknown reason.");
- }
- FSUtils.logFileSystemState(fs, rootDir, LOG);
-
-
- LOG.debug("Checking to see if file is deleted.");
- assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
- assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
-
- // then trigger a refresh
- cache.triggerCacheRefreshForTesting();
- // and not it shouldn't find those files
- assertFalse("Cache found '" + file1 + "', but it shouldn't have.",
- cache.contains(file1.getName()));
- assertFalse("Cache found '" + file2 + "', but it shouldn't have.",
- cache.contains(file2.getName()));
-
- fs.delete(snapshotDir, true);
- }
-
- @Test
- public void testLoadsTmpDir() throws Exception {
- // don't refresh the cache unless we tell it to
- long period = Long.MAX_VALUE;
- Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
- SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
- "test-snapshot-file-cache-refresh", new SnapshotFiles());
-
- // create a file in a 'completed' snapshot
- Path snapshot = new Path(snapshotDir, "snapshot");
- Path region = new Path(snapshot, "7e91021");
- Path family = new Path(region, "fam");
- Path file1 = new Path(family, "file1");
- fs.createNewFile(file1);
-
- // create an 'in progress' snapshot
- SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build();
- snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
- region = new Path(snapshot, "7e91021");
- family = new Path(region, "fam");
- Path file2 = new Path(family, "file2");
- fs.createNewFile(file2);
+ createAndTestSnapshotV1(cache, "snapshot1a", false, true);
+ createAndTestSnapshotV1(cache, "snapshot1b", true, true);
- FSUtils.logFileSystemState(fs, rootDir, LOG);
-
- // then make sure the cache finds both files
- assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
- assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
+ createAndTestSnapshotV2(cache, "snapshot2a", false, true);
+ createAndTestSnapshotV2(cache, "snapshot2b", true, true);
}
@Test
@@ -168,14 +103,14 @@ public class TestSnapshotFileCache {
});
// create a file in a 'completed' snapshot
- Path snapshot = new Path(snapshotDir, "snapshot");
- Path region = new Path(snapshot, "7e91021");
- Path family = new Path(region, "fam");
- Path file1 = new Path(family, "file1");
+ SnapshotDescription desc = SnapshotDescription.newBuilder().setName("snapshot").build();
+ Path snapshot = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir);
+ SnapshotDescriptionUtils.writeSnapshotInfo(desc, snapshot, fs);
+ Path file1 = new Path(new Path(new Path(snapshot, "7e91021"), "fam"), "file1");
fs.createNewFile(file1);
// and another file in the logs directory
- Path logs = TakeSnapshotUtils.getSnapshotHLogsDir(snapshot, "server");
+ Path logs = getSnapshotHLogsDir(snapshot, "server");
Path log = new Path(logs, "me.hbase.com%2C58939%2C1350424310315.1350424315552");
fs.createNewFile(log);
@@ -187,65 +122,117 @@ public class TestSnapshotFileCache {
assertTrue("Cache didn't find:" + log, cache.contains(log.getName()));
}
+ /**
+ * Get the log directory for a specific snapshot
+ * @param snapshotDir directory where the specific snapshot will be store
+ * @param serverName name of the parent regionserver for the log files
+ * @return path to the log home directory for the archive files.
+ */
+ public static Path getSnapshotHLogsDir(Path snapshotDir, String serverName) {
+ return new Path(snapshotDir, HLogUtil.getHLogDirectoryName(serverName));
+ }
+
@Test
public void testReloadModifiedDirectory() throws IOException {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
- Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
- Path snapshot = new Path(snapshotDir, "snapshot");
- Path region = new Path(snapshot, "7e91021");
- Path family = new Path(region, "fam");
- Path file1 = new Path(family, "file1");
- Path file2 = new Path(family, "file2");
-
- // create two hfiles under the snapshot
- fs.createNewFile(file1);
- fs.createNewFile(file2);
-
- FSUtils.logFileSystemState(fs, rootDir, LOG);
-
- assertTrue("Cache didn't find " + file1, cache.contains(file1.getName()));
-
+ createAndTestSnapshotV1(cache, "snapshot1", false, true);
// now delete the snapshot and add a file with a different name
- fs.delete(snapshot, true);
- Path file3 = new Path(family, "new_file");
- fs.createNewFile(file3);
+ createAndTestSnapshotV1(cache, "snapshot1", false, false);
- FSUtils.logFileSystemState(fs, rootDir, LOG);
- assertTrue("Cache didn't find new file:" + file3, cache.contains(file3.getName()));
+ createAndTestSnapshotV2(cache, "snapshot2", false, true);
+ // now delete the snapshot and add a file with a different name
+ createAndTestSnapshotV2(cache, "snapshot2", false, false);
}
@Test
public void testSnapshotTempDirReload() throws IOException {
long period = Long.MAX_VALUE;
// This doesn't refresh cache until we invoke it explicitly
- Path snapshotDir = new Path(SnapshotDescriptionUtils.getSnapshotsDir(rootDir),
- SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
- // Add a new snapshot
- Path snapshot1 = new Path(snapshotDir, "snapshot1");
- Path file1 = new Path(new Path(new Path(snapshot1, "7e91021"), "fam"), "file1");
- fs.createNewFile(file1);
- assertTrue(cache.contains(file1.getName()));
+ // Add a new non-tmp snapshot
+ createAndTestSnapshotV1(cache, "snapshot0v1", false, false);
+ createAndTestSnapshotV1(cache, "snapshot0v2", false, false);
+
+ // Add a new tmp snapshot
+ createAndTestSnapshotV2(cache, "snapshot1", true, false);
- // Add another snapshot
- Path snapshot2 = new Path(snapshotDir, "snapshot2");
- Path file2 = new Path(new Path(new Path(snapshot2, "7e91021"), "fam2"), "file2");
- fs.createNewFile(file2);
- assertTrue(cache.contains(file2.getName()));
+ // Add another tmp snapshot
+ createAndTestSnapshotV2(cache, "snapshot2", true, false);
}
class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector {
public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException {
Collection files = new HashSet();
files.addAll(SnapshotReferenceUtil.getHLogNames(fs, snapshotDir));
- files.addAll(SnapshotReferenceUtil.getHFileNames(fs, snapshotDir));
+ files.addAll(SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotDir));
return files;
}
};
+
+ private void createAndTestSnapshotV1(final SnapshotFileCache cache, final String name,
+ final boolean tmp, final boolean removeOnExit) throws IOException {
+ SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
+ SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1(name);
+ createAndTestSnapshot(cache, builder, tmp, removeOnExit);
+ }
+
+ private void createAndTestSnapshotV2(final SnapshotFileCache cache, final String name,
+ final boolean tmp, final boolean removeOnExit) throws IOException {
+ SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
+ SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(name);
+ createAndTestSnapshot(cache, builder, tmp, removeOnExit);
+ }
+
+ private void createAndTestSnapshot(final SnapshotFileCache cache,
+ final SnapshotMock.SnapshotBuilder builder,
+ final boolean tmp, final boolean removeOnExit) throws IOException {
+ List files = new ArrayList();
+ for (int i = 0; i < 3; ++i) {
+ for (Path filePath: builder.addRegion()) {
+ String fileName = filePath.getName();
+ if (tmp) {
+ // We should be able to find all the files while the snapshot creation is in-progress
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+ assertTrue("Cache didn't find " + fileName, cache.contains(fileName));
+ }
+ files.add(fileName);
+ }
+ }
+
+ // Finalize the snapshot
+ if (!tmp) {
+ builder.commit();
+ }
+
+ // Make sure that all files are still present
+ for (String fileName: files) {
+ assertTrue("Cache didn't find " + fileName, cache.contains(fileName));
+ }
+
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+ if (removeOnExit) {
+ LOG.debug("Deleting snapshot.");
+ fs.delete(builder.getSnapshotsDir(), true);
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ // The files should be in cache until next refresh
+ for (String fileName: files) {
+ assertTrue("Cache didn't find " + fileName, cache.contains(fileName));
+ }
+
+ // then trigger a refresh
+ cache.triggerCacheRefreshForTesting();
+ // and not it shouldn't find those files
+ for (String fileName: files) {
+ assertFalse("Cache found '" + fileName + "', but it shouldn't have.",
+ cache.contains(fileName));
+ }
+ }
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 02d3cda..c68a567 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -22,14 +22,18 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
+import java.util.Arrays;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -43,11 +47,13 @@ import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -56,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSVisitor;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.Assert;
@@ -144,14 +151,12 @@ public class SnapshotTestingUtils {
*/
public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
- byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs,
- boolean requireLogs, Path logsDir, Set snapshotServers)
+ byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs)
throws IOException {
ArrayList nonEmptyTestFamilies = new ArrayList(1);
nonEmptyTestFamilies.add(testFamily);
confirmSnapshotValid(snapshotDescriptor, tableName,
- nonEmptyTestFamilies, null, rootDir, admin, fs, requireLogs,
- logsDir, snapshotServers);
+ nonEmptyTestFamilies, null, rootDir, admin, fs);
}
/**
@@ -159,14 +164,12 @@ public class SnapshotTestingUtils {
*/
public static void confirmEmptySnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
- byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs,
- boolean requireLogs, Path logsDir, Set snapshotServers)
+ byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs)
throws IOException {
ArrayList emptyTestFamilies = new ArrayList(1);
emptyTestFamilies.add(testFamily);
confirmSnapshotValid(snapshotDescriptor, tableName,
- null, emptyTestFamilies, rootDir, admin, fs, requireLogs,
- logsDir, snapshotServers);
+ null, emptyTestFamilies, rootDir, admin, fs);
}
/**
@@ -178,45 +181,31 @@ public class SnapshotTestingUtils {
public static void confirmSnapshotValid(
SnapshotDescription snapshotDescriptor, TableName tableName,
List nonEmptyTestFamilies, List emptyTestFamilies,
- Path rootDir, HBaseAdmin admin, FileSystem fs, boolean requireLogs,
- Path logsDir, Set snapshotServers) throws IOException {
+ Path rootDir, HBaseAdmin admin, FileSystem fs) throws IOException {
+ final Configuration conf = admin.getConfiguration();
+
// check snapshot dir
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(
snapshotDescriptor, rootDir);
assertTrue(fs.exists(snapshotDir));
- // check snapshot info
- Path snapshotinfo = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
- assertTrue(fs.exists(snapshotinfo));
-
- // check the logs dir
- if (requireLogs) {
- TakeSnapshotUtils.verifyAllLogsGotReferenced(fs, logsDir,
- snapshotServers, snapshotDescriptor, new Path(snapshotDir,
- HConstants.HREGION_LOGDIR_NAME));
- }
-
- // check the table info
- HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, tableName);
- HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
- assertEquals(desc, snapshotDesc);
+ SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
// Extract regions and families with store files
- final Set