diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java index 1457fe8..85e4816 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java @@ -720,36 +720,50 @@ public final class MapReduceProtos { public interface TableSnapshotRegionSplitOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional .RegionSpecifier region = 1; + // optional .TableSchema table = 1; /** - * optional .RegionSpecifier region = 1; + * optional .TableSchema table = 1; + */ + boolean hasTable(); + /** + * optional .TableSchema table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable(); + /** + * optional .TableSchema table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder(); + + // optional .RegionInfo region = 2; + /** + * optional .RegionInfo region = 2; */ boolean hasRegion(); /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); - // repeated string locations = 2; + // repeated string locations = 3; /** - * repeated string locations = 2; + * repeated string locations = 3; */ java.util.List getLocationsList(); /** - * repeated string locations = 2; + * repeated string locations = 3; */ int getLocationsCount(); /** - * repeated string locations = 2; + * repeated string locations = 3; */ java.lang.String getLocations(int index); /** - * repeated string locations = 2; + * repeated string locations = 3; */ com.google.protobuf.ByteString getLocationsBytes(int index); @@ -806,22 +820,35 @@ public final class MapReduceProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = region_.toBuilder(); } - region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry); + region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(region_); region_ = subBuilder.buildPartial(); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; break; } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { locations_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000002; + mutable_bitField0_ |= 0x00000004; } locations_.add(input.readBytes()); break; @@ -834,7 +861,7 @@ public final class MapReduceProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { locations_ = new com.google.protobuf.UnmodifiableLazyStringList(locations_); } this.unknownFields = unknownFields.build(); @@ -869,52 +896,74 @@ public final class MapReduceProtos { } private int bitField0_; - // optional .RegionSpecifier region = 1; - public static final int REGION_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_; + // optional .TableSchema table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_; /** - * optional .RegionSpecifier region = 1; + * optional .TableSchema table = 1; */ - public boolean hasRegion() { + public boolean hasTable() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .RegionSpecifier region = 1; + * optional .TableSchema table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() { + return table_; + } + /** + * optional .TableSchema table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() { + return table_; + } + + // optional .RegionInfo region = 2; + public static final int REGION_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_; + /** + * optional .RegionInfo region = 2; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .RegionInfo region = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { return region_; } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { return region_; } - // repeated string locations = 2; - public static final int LOCATIONS_FIELD_NUMBER = 2; + // repeated string locations = 3; + public static final int LOCATIONS_FIELD_NUMBER = 3; private com.google.protobuf.LazyStringList locations_; /** - * repeated string locations = 2; + * repeated string locations = 3; */ public java.util.List getLocationsList() { return locations_; } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public int getLocationsCount() { return locations_.size(); } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public java.lang.String getLocations(int index) { return locations_.get(index); } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public com.google.protobuf.ByteString getLocationsBytes(int index) { @@ -922,7 +971,8 @@ public final class MapReduceProtos { } private void initFields() { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; @@ -930,6 +980,12 @@ public final class MapReduceProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (hasTable()) { + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } if (hasRegion()) { if (!getRegion().isInitialized()) { memoizedIsInitialized = 0; @@ -944,10 +1000,13 @@ public final class MapReduceProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, region_); + output.writeMessage(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, region_); } for (int i = 0; i < locations_.size(); i++) { - output.writeBytes(2, locations_.getByteString(i)); + output.writeBytes(3, locations_.getByteString(i)); } getUnknownFields().writeTo(output); } @@ -960,7 +1019,11 @@ public final class MapReduceProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, region_); + .computeMessageSize(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, region_); } { int dataSize = 0; @@ -994,6 +1057,11 @@ public final class MapReduceProtos { org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other = (org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit) obj; boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } result = result && (hasRegion() == other.hasRegion()); if (hasRegion()) { result = result && getRegion() @@ -1014,6 +1082,10 @@ public final class MapReduceProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } if (hasRegion()) { hash = (37 * hash) + REGION_FIELD_NUMBER; hash = (53 * hash) + getRegion().hashCode(); @@ -1123,6 +1195,7 @@ public final class MapReduceProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); getRegionFieldBuilder(); } } @@ -1132,14 +1205,20 @@ public final class MapReduceProtos { public Builder clear() { super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); if (regionBuilder_ == null) { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -1171,15 +1250,23 @@ public final class MapReduceProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } if (regionBuilder_ == null) { result.region_ = region_; } else { result.region_ = regionBuilder_.build(); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { locations_ = new com.google.protobuf.UnmodifiableLazyStringList( locations_); - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); } result.locations_ = locations_; result.bitField0_ = to_bitField0_; @@ -1198,13 +1285,16 @@ public final class MapReduceProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } if (other.hasRegion()) { mergeRegion(other.getRegion()); } if (!other.locations_.isEmpty()) { if (locations_.isEmpty()) { locations_ = other.locations_; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); } else { ensureLocationsIsMutable(); locations_.addAll(other.locations_); @@ -1216,6 +1306,12 @@ public final class MapReduceProtos { } public final boolean isInitialized() { + if (hasTable()) { + if (!getTable().isInitialized()) { + + return false; + } + } if (hasRegion()) { if (!getRegion().isInitialized()) { @@ -1244,20 +1340,137 @@ public final class MapReduceProtos { } private int bitField0_; - // optional .RegionSpecifier region = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + // optional .TableSchema table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableBuilder_; /** - * optional .RegionSpecifier region = 1; + * optional .TableSchema table = 1; */ - public boolean hasRegion() { + public boolean hasTable() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .RegionSpecifier region = 1; + * optional .TableSchema table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * optional .TableSchema table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .TableSchema table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .TableSchema table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .TableSchema table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .TableSchema table = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * optional .TableSchema table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // optional .RegionInfo region = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; + /** + * optional .RegionInfo region = 2; + */ + public boolean hasRegion() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .RegionInfo region = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { if (regionBuilder_ == null) { return region_; } else { @@ -1265,9 +1478,9 @@ public final class MapReduceProtos { } } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -1277,32 +1490,32 @@ public final class MapReduceProtos { } else { regionBuilder_.setMessage(value); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ public Builder setRegion( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionBuilder_ == null) { region_ = builderForValue.build(); onChanged(); } else { regionBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { region_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); } else { region_ = value; } @@ -1310,34 +1523,34 @@ public final class MapReduceProtos { } else { regionBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ public Builder clearRegion() { if (regionBuilder_ == null) { - region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); onChanged(); } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() { - bitField0_ |= 0x00000001; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { + bitField0_ |= 0x00000002; onChanged(); return getRegionFieldBuilder().getBuilder(); } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { if (regionBuilder_ != null) { return regionBuilder_.getMessageOrBuilder(); } else { @@ -1345,14 +1558,14 @@ public final class MapReduceProtos { } } /** - * optional .RegionSpecifier region = 1; + * optional .RegionInfo region = 2; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionFieldBuilder() { if (regionBuilder_ == null) { regionBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( region_, getParentForChildren(), isClean()); @@ -1361,42 +1574,42 @@ public final class MapReduceProtos { return regionBuilder_; } - // repeated string locations = 2; + // repeated string locations = 3; private com.google.protobuf.LazyStringList locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureLocationsIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { locations_ = new com.google.protobuf.LazyStringArrayList(locations_); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; } } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public java.util.List getLocationsList() { return java.util.Collections.unmodifiableList(locations_); } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public int getLocationsCount() { return locations_.size(); } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public java.lang.String getLocations(int index) { return locations_.get(index); } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public com.google.protobuf.ByteString getLocationsBytes(int index) { return locations_.getByteString(index); } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public Builder setLocations( int index, java.lang.String value) { @@ -1409,7 +1622,7 @@ public final class MapReduceProtos { return this; } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public Builder addLocations( java.lang.String value) { @@ -1422,7 +1635,7 @@ public final class MapReduceProtos { return this; } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public Builder addAllLocations( java.lang.Iterable values) { @@ -1432,16 +1645,16 @@ public final class MapReduceProtos { return this; } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public Builder clearLocations() { locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** - * repeated string locations = 2; + * repeated string locations = 3; */ public Builder addLocationsBytes( com.google.protobuf.ByteString value) { @@ -1485,11 +1698,12 @@ public final class MapReduceProtos { static { java.lang.String[] descriptorData = { "\n\017MapReduce.proto\032\013HBase.proto\".\n\013ScanMe" + - "trics\022\037\n\007metrics\030\001 \003(\0132\016.NameInt64Pair\"O" + - "\n\030TableSnapshotRegionSplit\022 \n\006region\030\001 \001" + - "(\0132\020.RegionSpecifier\022\021\n\tlocations\030\002 \003(\tB" + - "B\n*org.apache.hadoop.hbase.protobuf.gene" + - "ratedB\017MapReduceProtosH\001\240\001\001" + "trics\022\037\n\007metrics\030\001 \003(\0132\016.NameInt64Pair\"g" + + "\n\030TableSnapshotRegionSplit\022\033\n\005table\030\001 \001(" + + "\0132\014.TableSchema\022\033\n\006region\030\002 \001(\0132\013.Region" + + "Info\022\021\n\tlocations\030\003 \003(\tBB\n*org.apache.ha" + + "doop.hbase.protobuf.generatedB\017MapReduce" + + "ProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -1507,7 +1721,7 @@ public final class MapReduceProtos { internal_static_TableSnapshotRegionSplit_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSnapshotRegionSplit_descriptor, - new java.lang.String[] { "Region", "Locations", }); + new java.lang.String[] { "Table", "Region", "Locations", }); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java new file mode 100644 index 0000000..8dbb5ad --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java @@ -0,0 +1,4787 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Snapshot.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class SnapshotProtos { + private SnapshotProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface SnapshotFileInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .SnapshotFileInfo.Type type = 1; + /** + * required .SnapshotFileInfo.Type type = 1; + */ + boolean hasType(); + /** + * required .SnapshotFileInfo.Type type = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type getType(); + + // optional string hfile = 3; + /** + * optional string hfile = 3; + */ + boolean hasHfile(); + /** + * optional string hfile = 3; + */ + java.lang.String getHfile(); + /** + * optional string hfile = 3; + */ + com.google.protobuf.ByteString + getHfileBytes(); + + // optional string wal_server = 4; + /** + * optional string wal_server = 4; + */ + boolean hasWalServer(); + /** + * optional string wal_server = 4; + */ + java.lang.String getWalServer(); + /** + * optional string wal_server = 4; + */ + com.google.protobuf.ByteString + getWalServerBytes(); + + // optional string wal_name = 5; + /** + * optional string wal_name = 5; + */ + boolean hasWalName(); + /** + * optional string wal_name = 5; + */ + java.lang.String getWalName(); + /** + * optional string wal_name = 5; + */ + com.google.protobuf.ByteString + getWalNameBytes(); + } + /** + * Protobuf type {@code SnapshotFileInfo} + */ + public static final class SnapshotFileInfo extends + com.google.protobuf.GeneratedMessage + implements SnapshotFileInfoOrBuilder { + // Use SnapshotFileInfo.newBuilder() to construct. + private SnapshotFileInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotFileInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SnapshotFileInfo defaultInstance; + public static SnapshotFileInfo getDefaultInstance() { + return defaultInstance; + } + + public SnapshotFileInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SnapshotFileInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type value = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000002; + hfile_ = input.readBytes(); + break; + } + case 34: { + bitField0_ |= 0x00000004; + walServer_ = input.readBytes(); + break; + } + case 42: { + bitField0_ |= 0x00000008; + walName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotFileInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SnapshotFileInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code SnapshotFileInfo.Type} + */ + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { + /** + * HFILE = 1; + */ + HFILE(0, 1), + /** + * WAL = 2; + */ + WAL(1, 2), + ; + + /** + * HFILE = 1; + */ + public static final int HFILE_VALUE = 1; + /** + * WAL = 2; + */ + public static final int WAL_VALUE = 2; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 1: return HFILE; + case 2: return WAL; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:SnapshotFileInfo.Type) + } + + private int bitField0_; + // required .SnapshotFileInfo.Type type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type type_; + /** + * required .SnapshotFileInfo.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .SnapshotFileInfo.Type type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type getType() { + return type_; + } + + // optional string hfile = 3; + public static final int HFILE_FIELD_NUMBER = 3; + private java.lang.Object hfile_; + /** + * optional string hfile = 3; + */ + public boolean hasHfile() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string hfile = 3; + */ + public java.lang.String getHfile() { + java.lang.Object ref = hfile_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + hfile_ = s; + } + return s; + } + } + /** + * optional string hfile = 3; + */ + public com.google.protobuf.ByteString + getHfileBytes() { + java.lang.Object ref = hfile_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hfile_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string wal_server = 4; + public static final int WAL_SERVER_FIELD_NUMBER = 4; + private java.lang.Object walServer_; + /** + * optional string wal_server = 4; + */ + public boolean hasWalServer() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string wal_server = 4; + */ + public java.lang.String getWalServer() { + java.lang.Object ref = walServer_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + walServer_ = s; + } + return s; + } + } + /** + * optional string wal_server = 4; + */ + public com.google.protobuf.ByteString + getWalServerBytes() { + java.lang.Object ref = walServer_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + walServer_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string wal_name = 5; + public static final int WAL_NAME_FIELD_NUMBER = 5; + private java.lang.Object walName_; + /** + * optional string wal_name = 5; + */ + public boolean hasWalName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string wal_name = 5; + */ + public java.lang.String getWalName() { + java.lang.Object ref = walName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + walName_ = s; + } + return s; + } + } + /** + * optional string wal_name = 5; + */ + public com.google.protobuf.ByteString + getWalNameBytes() { + java.lang.Object ref = walName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + walName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.HFILE; + hfile_ = ""; + walServer_ = ""; + walName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getHfileBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(4, getWalServerBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(5, getWalNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getHfileBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getWalServerBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getWalNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasHfile() == other.hasHfile()); + if (hasHfile()) { + result = result && getHfile() + .equals(other.getHfile()); + } + result = result && (hasWalServer() == other.hasWalServer()); + if (hasWalServer()) { + result = result && getWalServer() + .equals(other.getWalServer()); + } + result = result && (hasWalName() == other.hasWalName()); + if (hasWalName()) { + result = result && getWalName() + .equals(other.getWalName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (hasHfile()) { + hash = (37 * hash) + HFILE_FIELD_NUMBER; + hash = (53 * hash) + getHfile().hashCode(); + } + if (hasWalServer()) { + hash = (37 * hash) + WAL_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getWalServer().hashCode(); + } + if (hasWalName()) { + hash = (37 * hash) + WAL_NAME_FIELD_NUMBER; + hash = (53 * hash) + getWalName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SnapshotFileInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.HFILE; + bitField0_ = (bitField0_ & ~0x00000001); + hfile_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + walServer_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + walName_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotFileInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo build() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo result = new org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.hfile_ = hfile_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.walServer_ = walServer_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.walName_ = walName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasHfile()) { + bitField0_ |= 0x00000002; + hfile_ = other.hfile_; + onChanged(); + } + if (other.hasWalServer()) { + bitField0_ |= 0x00000004; + walServer_ = other.walServer_; + onChanged(); + } + if (other.hasWalName()) { + bitField0_ |= 0x00000008; + walName_ = other.walName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .SnapshotFileInfo.Type type = 1; + private org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type type_ = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.HFILE; + /** + * required .SnapshotFileInfo.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .SnapshotFileInfo.Type type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type getType() { + return type_; + } + /** + * required .SnapshotFileInfo.Type type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .SnapshotFileInfo.Type type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo.Type.HFILE; + onChanged(); + return this; + } + + // optional string hfile = 3; + private java.lang.Object hfile_ = ""; + /** + * optional string hfile = 3; + */ + public boolean hasHfile() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string hfile = 3; + */ + public java.lang.String getHfile() { + java.lang.Object ref = hfile_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hfile_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string hfile = 3; + */ + public com.google.protobuf.ByteString + getHfileBytes() { + java.lang.Object ref = hfile_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hfile_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string hfile = 3; + */ + public Builder setHfile( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + hfile_ = value; + onChanged(); + return this; + } + /** + * optional string hfile = 3; + */ + public Builder clearHfile() { + bitField0_ = (bitField0_ & ~0x00000002); + hfile_ = getDefaultInstance().getHfile(); + onChanged(); + return this; + } + /** + * optional string hfile = 3; + */ + public Builder setHfileBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + hfile_ = value; + onChanged(); + return this; + } + + // optional string wal_server = 4; + private java.lang.Object walServer_ = ""; + /** + * optional string wal_server = 4; + */ + public boolean hasWalServer() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string wal_server = 4; + */ + public java.lang.String getWalServer() { + java.lang.Object ref = walServer_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + walServer_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string wal_server = 4; + */ + public com.google.protobuf.ByteString + getWalServerBytes() { + java.lang.Object ref = walServer_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + walServer_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string wal_server = 4; + */ + public Builder setWalServer( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + walServer_ = value; + onChanged(); + return this; + } + /** + * optional string wal_server = 4; + */ + public Builder clearWalServer() { + bitField0_ = (bitField0_ & ~0x00000004); + walServer_ = getDefaultInstance().getWalServer(); + onChanged(); + return this; + } + /** + * optional string wal_server = 4; + */ + public Builder setWalServerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + walServer_ = value; + onChanged(); + return this; + } + + // optional string wal_name = 5; + private java.lang.Object walName_ = ""; + /** + * optional string wal_name = 5; + */ + public boolean hasWalName() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string wal_name = 5; + */ + public java.lang.String getWalName() { + java.lang.Object ref = walName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + walName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string wal_name = 5; + */ + public com.google.protobuf.ByteString + getWalNameBytes() { + java.lang.Object ref = walName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + walName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string wal_name = 5; + */ + public Builder setWalName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + walName_ = value; + onChanged(); + return this; + } + /** + * optional string wal_name = 5; + */ + public Builder clearWalName() { + bitField0_ = (bitField0_ & ~0x00000008); + walName_ = getDefaultInstance().getWalName(); + onChanged(); + return this; + } + /** + * optional string wal_name = 5; + */ + public Builder setWalNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + walName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SnapshotFileInfo) + } + + static { + defaultInstance = new SnapshotFileInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SnapshotFileInfo) + } + + public interface SnapshotRegionManifestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional int32 version = 1; + /** + * optional int32 version = 1; + */ + boolean hasVersion(); + /** + * optional int32 version = 1; + */ + int getVersion(); + + // required .RegionInfo region_info = 2; + /** + * required .RegionInfo region_info = 2; + */ + boolean hasRegionInfo(); + /** + * required .RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + // repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + java.util.List + getFamilyFilesList(); + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getFamilyFiles(int index); + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + int getFamilyFilesCount(); + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + java.util.List + getFamilyFilesOrBuilderList(); + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder getFamilyFilesOrBuilder( + int index); + } + /** + * Protobuf type {@code SnapshotRegionManifest} + */ + public static final class SnapshotRegionManifest extends + com.google.protobuf.GeneratedMessage + implements SnapshotRegionManifestOrBuilder { + // Use SnapshotRegionManifest.newBuilder() to construct. + private SnapshotRegionManifest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotRegionManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SnapshotRegionManifest defaultInstance; + public static SnapshotRegionManifest getDefaultInstance() { + return defaultInstance; + } + + public SnapshotRegionManifest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SnapshotRegionManifest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + version_ = input.readInt32(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + familyFiles_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + familyFiles_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + familyFiles_ = java.util.Collections.unmodifiableList(familyFiles_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotRegionManifest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SnapshotRegionManifest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface StoreFileOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // optional .Reference reference = 2; + /** + * optional .Reference reference = 2; + */ + boolean hasReference(); + /** + * optional .Reference reference = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getReference(); + /** + * optional .Reference reference = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder getReferenceOrBuilder(); + + // optional uint64 file_size = 3; + /** + * optional uint64 file_size = 3; + * + *
+       * TODO: Add checksums or other fields to verify the file
+       * 
+ */ + boolean hasFileSize(); + /** + * optional uint64 file_size = 3; + * + *
+       * TODO: Add checksums or other fields to verify the file
+       * 
+ */ + long getFileSize(); + } + /** + * Protobuf type {@code SnapshotRegionManifest.StoreFile} + */ + public static final class StoreFile extends + com.google.protobuf.GeneratedMessage + implements StoreFileOrBuilder { + // Use StoreFile.newBuilder() to construct. + private StoreFile(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreFile(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreFile defaultInstance; + public static StoreFile getDefaultInstance() { + return defaultInstance; + } + + public StoreFile getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreFile( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = reference_.toBuilder(); + } + reference_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(reference_); + reference_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + fileSize_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreFile parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreFile(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .Reference reference = 2; + public static final int REFERENCE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference reference_; + /** + * optional .Reference reference = 2; + */ + public boolean hasReference() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .Reference reference = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getReference() { + return reference_; + } + /** + * optional .Reference reference = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder getReferenceOrBuilder() { + return reference_; + } + + // optional uint64 file_size = 3; + public static final int FILE_SIZE_FIELD_NUMBER = 3; + private long fileSize_; + /** + * optional uint64 file_size = 3; + * + *
+       * TODO: Add checksums or other fields to verify the file
+       * 
+ */ + public boolean hasFileSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 file_size = 3; + * + *
+       * TODO: Add checksums or other fields to verify the file
+       * 
+ */ + public long getFileSize() { + return fileSize_; + } + + private void initFields() { + name_ = ""; + reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance(); + fileSize_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (hasReference()) { + if (!getReference().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, reference_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, fileSize_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, reference_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, fileSize_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && (hasReference() == other.hasReference()); + if (hasReference()) { + result = result && getReference() + .equals(other.getReference()); + } + result = result && (hasFileSize() == other.hasFileSize()); + if (hasFileSize()) { + result = result && (getFileSize() + == other.getFileSize()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (hasReference()) { + hash = (37 * hash) + REFERENCE_FIELD_NUMBER; + hash = (53 * hash) + getReference().hashCode(); + } + if (hasFileSize()) { + hash = (37 * hash) + FILE_SIZE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getFileSize()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SnapshotRegionManifest.StoreFile} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getReferenceFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (referenceBuilder_ == null) { + reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance(); + } else { + referenceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + fileSize_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_StoreFile_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile build() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile result = new org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (referenceBuilder_ == null) { + result.reference_ = reference_; + } else { + result.reference_ = referenceBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.fileSize_ = fileSize_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasReference()) { + mergeReference(other.getReference()); + } + if (other.hasFileSize()) { + setFileSize(other.getFileSize()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (hasReference()) { + if (!getReference().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // optional .Reference reference = 2; + private org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference, org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder> referenceBuilder_; + /** + * optional .Reference reference = 2; + */ + public boolean hasReference() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .Reference reference = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference getReference() { + if (referenceBuilder_ == null) { + return reference_; + } else { + return referenceBuilder_.getMessage(); + } + } + /** + * optional .Reference reference = 2; + */ + public Builder setReference(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference value) { + if (referenceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + reference_ = value; + onChanged(); + } else { + referenceBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .Reference reference = 2; + */ + public Builder setReference( + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder builderForValue) { + if (referenceBuilder_ == null) { + reference_ = builderForValue.build(); + onChanged(); + } else { + referenceBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .Reference reference = 2; + */ + public Builder mergeReference(org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference value) { + if (referenceBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + reference_ != org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance()) { + reference_ = + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.newBuilder(reference_).mergeFrom(value).buildPartial(); + } else { + reference_ = value; + } + onChanged(); + } else { + referenceBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .Reference reference = 2; + */ + public Builder clearReference() { + if (referenceBuilder_ == null) { + reference_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.getDefaultInstance(); + onChanged(); + } else { + referenceBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .Reference reference = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder getReferenceBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getReferenceFieldBuilder().getBuilder(); + } + /** + * optional .Reference reference = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder getReferenceOrBuilder() { + if (referenceBuilder_ != null) { + return referenceBuilder_.getMessageOrBuilder(); + } else { + return reference_; + } + } + /** + * optional .Reference reference = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference, org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder> + getReferenceFieldBuilder() { + if (referenceBuilder_ == null) { + referenceBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference, org.apache.hadoop.hbase.protobuf.generated.FSProtos.Reference.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.ReferenceOrBuilder>( + reference_, + getParentForChildren(), + isClean()); + reference_ = null; + } + return referenceBuilder_; + } + + // optional uint64 file_size = 3; + private long fileSize_ ; + /** + * optional uint64 file_size = 3; + * + *
+         * TODO: Add checksums or other fields to verify the file
+         * 
+ */ + public boolean hasFileSize() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 file_size = 3; + * + *
+         * TODO: Add checksums or other fields to verify the file
+         * 
+ */ + public long getFileSize() { + return fileSize_; + } + /** + * optional uint64 file_size = 3; + * + *
+         * TODO: Add checksums or other fields to verify the file
+         * 
+ */ + public Builder setFileSize(long value) { + bitField0_ |= 0x00000004; + fileSize_ = value; + onChanged(); + return this; + } + /** + * optional uint64 file_size = 3; + * + *
+         * TODO: Add checksums or other fields to verify the file
+         * 
+ */ + public Builder clearFileSize() { + bitField0_ = (bitField0_ & ~0x00000004); + fileSize_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SnapshotRegionManifest.StoreFile) + } + + static { + defaultInstance = new StoreFile(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SnapshotRegionManifest.StoreFile) + } + + public interface FamilyFilesOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes family_name = 1; + /** + * required bytes family_name = 1; + */ + boolean hasFamilyName(); + /** + * required bytes family_name = 1; + */ + com.google.protobuf.ByteString getFamilyName(); + + // repeated .SnapshotRegionManifest.StoreFile store_files = 2; + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + java.util.List + getStoreFilesList(); + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getStoreFiles(int index); + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + int getStoreFilesCount(); + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + java.util.List + getStoreFilesOrBuilderList(); + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder getStoreFilesOrBuilder( + int index); + } + /** + * Protobuf type {@code SnapshotRegionManifest.FamilyFiles} + */ + public static final class FamilyFiles extends + com.google.protobuf.GeneratedMessage + implements FamilyFilesOrBuilder { + // Use FamilyFiles.newBuilder() to construct. + private FamilyFiles(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FamilyFiles(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FamilyFiles defaultInstance; + public static FamilyFiles getDefaultInstance() { + return defaultInstance; + } + + public FamilyFiles getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FamilyFiles( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + familyName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeFiles_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + storeFiles_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeFiles_ = java.util.Collections.unmodifiableList(storeFiles_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_FamilyFiles_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_FamilyFiles_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FamilyFiles parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FamilyFiles(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes family_name = 1; + public static final int FAMILY_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString familyName_; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + + // repeated .SnapshotRegionManifest.StoreFile store_files = 2; + public static final int STORE_FILES_FIELD_NUMBER = 2; + private java.util.List storeFiles_; + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public java.util.List getStoreFilesList() { + return storeFiles_; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public java.util.List + getStoreFilesOrBuilderList() { + return storeFiles_; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public int getStoreFilesCount() { + return storeFiles_.size(); + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getStoreFiles(int index) { + return storeFiles_.get(index); + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder getStoreFilesOrBuilder( + int index) { + return storeFiles_.get(index); + } + + private void initFields() { + familyName_ = com.google.protobuf.ByteString.EMPTY; + storeFiles_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFamilyName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getStoreFilesCount(); i++) { + if (!getStoreFiles(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, familyName_); + } + for (int i = 0; i < storeFiles_.size(); i++) { + output.writeMessage(2, storeFiles_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, familyName_); + } + for (int i = 0; i < storeFiles_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, storeFiles_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles) obj; + + boolean result = true; + result = result && (hasFamilyName() == other.hasFamilyName()); + if (hasFamilyName()) { + result = result && getFamilyName() + .equals(other.getFamilyName()); + } + result = result && getStoreFilesList() + .equals(other.getStoreFilesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFamilyName()) { + hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getFamilyName().hashCode(); + } + if (getStoreFilesCount() > 0) { + hash = (37 * hash) + STORE_FILES_FIELD_NUMBER; + hash = (53 * hash) + getStoreFilesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SnapshotRegionManifest.FamilyFiles} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_FamilyFiles_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_FamilyFiles_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreFilesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + familyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + if (storeFilesBuilder_ == null) { + storeFiles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + storeFilesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_FamilyFiles_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles build() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles result = new org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.familyName_ = familyName_; + if (storeFilesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + storeFiles_ = java.util.Collections.unmodifiableList(storeFiles_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.storeFiles_ = storeFiles_; + } else { + result.storeFiles_ = storeFilesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.getDefaultInstance()) return this; + if (other.hasFamilyName()) { + setFamilyName(other.getFamilyName()); + } + if (storeFilesBuilder_ == null) { + if (!other.storeFiles_.isEmpty()) { + if (storeFiles_.isEmpty()) { + storeFiles_ = other.storeFiles_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStoreFilesIsMutable(); + storeFiles_.addAll(other.storeFiles_); + } + onChanged(); + } + } else { + if (!other.storeFiles_.isEmpty()) { + if (storeFilesBuilder_.isEmpty()) { + storeFilesBuilder_.dispose(); + storeFilesBuilder_ = null; + storeFiles_ = other.storeFiles_; + bitField0_ = (bitField0_ & ~0x00000002); + storeFilesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreFilesFieldBuilder() : null; + } else { + storeFilesBuilder_.addAllMessages(other.storeFiles_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFamilyName()) { + + return false; + } + for (int i = 0; i < getStoreFilesCount(); i++) { + if (!getStoreFiles(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes family_name = 1; + private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + /** + * required bytes family_name = 1; + */ + public Builder setFamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + familyName_ = value; + onChanged(); + return this; + } + /** + * required bytes family_name = 1; + */ + public Builder clearFamilyName() { + bitField0_ = (bitField0_ & ~0x00000001); + familyName_ = getDefaultInstance().getFamilyName(); + onChanged(); + return this; + } + + // repeated .SnapshotRegionManifest.StoreFile store_files = 2; + private java.util.List storeFiles_ = + java.util.Collections.emptyList(); + private void ensureStoreFilesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + storeFiles_ = new java.util.ArrayList(storeFiles_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder> storeFilesBuilder_; + + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public java.util.List getStoreFilesList() { + if (storeFilesBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeFiles_); + } else { + return storeFilesBuilder_.getMessageList(); + } + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public int getStoreFilesCount() { + if (storeFilesBuilder_ == null) { + return storeFiles_.size(); + } else { + return storeFilesBuilder_.getCount(); + } + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile getStoreFiles(int index) { + if (storeFilesBuilder_ == null) { + return storeFiles_.get(index); + } else { + return storeFilesBuilder_.getMessage(index); + } + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder setStoreFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile value) { + if (storeFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFilesIsMutable(); + storeFiles_.set(index, value); + onChanged(); + } else { + storeFilesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder setStoreFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder builderForValue) { + if (storeFilesBuilder_ == null) { + ensureStoreFilesIsMutable(); + storeFiles_.set(index, builderForValue.build()); + onChanged(); + } else { + storeFilesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder addStoreFiles(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile value) { + if (storeFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFilesIsMutable(); + storeFiles_.add(value); + onChanged(); + } else { + storeFilesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder addStoreFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile value) { + if (storeFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreFilesIsMutable(); + storeFiles_.add(index, value); + onChanged(); + } else { + storeFilesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder addStoreFiles( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder builderForValue) { + if (storeFilesBuilder_ == null) { + ensureStoreFilesIsMutable(); + storeFiles_.add(builderForValue.build()); + onChanged(); + } else { + storeFilesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder addStoreFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder builderForValue) { + if (storeFilesBuilder_ == null) { + ensureStoreFilesIsMutable(); + storeFiles_.add(index, builderForValue.build()); + onChanged(); + } else { + storeFilesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder addAllStoreFiles( + java.lang.Iterable values) { + if (storeFilesBuilder_ == null) { + ensureStoreFilesIsMutable(); + super.addAll(values, storeFiles_); + onChanged(); + } else { + storeFilesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder clearStoreFiles() { + if (storeFilesBuilder_ == null) { + storeFiles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + storeFilesBuilder_.clear(); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public Builder removeStoreFiles(int index) { + if (storeFilesBuilder_ == null) { + ensureStoreFilesIsMutable(); + storeFiles_.remove(index); + onChanged(); + } else { + storeFilesBuilder_.remove(index); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder getStoreFilesBuilder( + int index) { + return getStoreFilesFieldBuilder().getBuilder(index); + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder getStoreFilesOrBuilder( + int index) { + if (storeFilesBuilder_ == null) { + return storeFiles_.get(index); } else { + return storeFilesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public java.util.List + getStoreFilesOrBuilderList() { + if (storeFilesBuilder_ != null) { + return storeFilesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeFiles_); + } + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder addStoreFilesBuilder() { + return getStoreFilesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.getDefaultInstance()); + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder addStoreFilesBuilder( + int index) { + return getStoreFilesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.getDefaultInstance()); + } + /** + * repeated .SnapshotRegionManifest.StoreFile store_files = 2; + */ + public java.util.List + getStoreFilesBuilderList() { + return getStoreFilesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder> + getStoreFilesFieldBuilder() { + if (storeFilesBuilder_ == null) { + storeFilesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFileOrBuilder>( + storeFiles_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + storeFiles_ = null; + } + return storeFilesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:SnapshotRegionManifest.FamilyFiles) + } + + static { + defaultInstance = new FamilyFiles(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SnapshotRegionManifest.FamilyFiles) + } + + private int bitField0_; + // optional int32 version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private int version_; + /** + * optional int32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int32 version = 1; + */ + public int getVersion() { + return version_; + } + + // required .RegionInfo region_info = 2; + public static final int REGION_INFO_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_; + } + /** + * required .RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_; + } + + // repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + public static final int FAMILY_FILES_FIELD_NUMBER = 3; + private java.util.List familyFiles_; + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public java.util.List getFamilyFilesList() { + return familyFiles_; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public java.util.List + getFamilyFilesOrBuilderList() { + return familyFiles_; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public int getFamilyFilesCount() { + return familyFiles_.size(); + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getFamilyFiles(int index) { + return familyFiles_.get(index); + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder getFamilyFilesOrBuilder( + int index) { + return familyFiles_.get(index); + } + + private void initFields() { + version_ = 0; + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + familyFiles_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getFamilyFilesCount(); i++) { + if (!getFamilyFiles(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt32(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, regionInfo_); + } + for (int i = 0; i < familyFiles_.size(); i++) { + output.writeMessage(3, familyFiles_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, regionInfo_); + } + for (int i = 0; i < familyFiles_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, familyFiles_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); + } + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && getFamilyFilesList() + .equals(other.getFamilyFilesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + } + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + if (getFamilyFilesCount() > 0) { + hash = (37 * hash) + FAMILY_FILES_FIELD_NUMBER; + hash = (53 * hash) + getFamilyFilesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SnapshotRegionManifest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + getFamilyFilesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (familyFilesBuilder_ == null) { + familyFiles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + familyFilesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotRegionManifest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest build() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest result = new org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (familyFilesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + familyFiles_ = java.util.Collections.unmodifiableList(familyFiles_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.familyFiles_ = familyFiles_; + } else { + result.familyFiles_ = familyFilesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.getDefaultInstance()) return this; + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + if (familyFilesBuilder_ == null) { + if (!other.familyFiles_.isEmpty()) { + if (familyFiles_.isEmpty()) { + familyFiles_ = other.familyFiles_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureFamilyFilesIsMutable(); + familyFiles_.addAll(other.familyFiles_); + } + onChanged(); + } + } else { + if (!other.familyFiles_.isEmpty()) { + if (familyFilesBuilder_.isEmpty()) { + familyFilesBuilder_.dispose(); + familyFilesBuilder_ = null; + familyFiles_ = other.familyFiles_; + bitField0_ = (bitField0_ & ~0x00000004); + familyFilesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getFamilyFilesFieldBuilder() : null; + } else { + familyFilesBuilder_.addAllMessages(other.familyFiles_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegionInfo()) { + + return false; + } + if (!getRegionInfo().isInitialized()) { + + return false; + } + for (int i = 0; i < getFamilyFilesCount(); i++) { + if (!getFamilyFiles(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional int32 version = 1; + private int version_ ; + /** + * optional int32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional int32 version = 1; + */ + public int getVersion() { + return version_; + } + /** + * optional int32 version = 1; + */ + public Builder setVersion(int value) { + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * optional int32 version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = 0; + onChanged(); + return this; + } + + // required .RegionInfo region_info = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .RegionInfo region_info = 2; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .RegionInfo region_info = 2; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .RegionInfo region_info = 2; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .RegionInfo region_info = 2; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_; + } + } + /** + * required .RegionInfo region_info = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + private java.util.List familyFiles_ = + java.util.Collections.emptyList(); + private void ensureFamilyFilesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + familyFiles_ = new java.util.ArrayList(familyFiles_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder> familyFilesBuilder_; + + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public java.util.List getFamilyFilesList() { + if (familyFilesBuilder_ == null) { + return java.util.Collections.unmodifiableList(familyFiles_); + } else { + return familyFilesBuilder_.getMessageList(); + } + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public int getFamilyFilesCount() { + if (familyFilesBuilder_ == null) { + return familyFiles_.size(); + } else { + return familyFilesBuilder_.getCount(); + } + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles getFamilyFiles(int index) { + if (familyFilesBuilder_ == null) { + return familyFiles_.get(index); + } else { + return familyFilesBuilder_.getMessage(index); + } + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder setFamilyFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles value) { + if (familyFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFamilyFilesIsMutable(); + familyFiles_.set(index, value); + onChanged(); + } else { + familyFilesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder setFamilyFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder builderForValue) { + if (familyFilesBuilder_ == null) { + ensureFamilyFilesIsMutable(); + familyFiles_.set(index, builderForValue.build()); + onChanged(); + } else { + familyFilesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder addFamilyFiles(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles value) { + if (familyFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFamilyFilesIsMutable(); + familyFiles_.add(value); + onChanged(); + } else { + familyFilesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder addFamilyFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles value) { + if (familyFilesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFamilyFilesIsMutable(); + familyFiles_.add(index, value); + onChanged(); + } else { + familyFilesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder addFamilyFiles( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder builderForValue) { + if (familyFilesBuilder_ == null) { + ensureFamilyFilesIsMutable(); + familyFiles_.add(builderForValue.build()); + onChanged(); + } else { + familyFilesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder addFamilyFiles( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder builderForValue) { + if (familyFilesBuilder_ == null) { + ensureFamilyFilesIsMutable(); + familyFiles_.add(index, builderForValue.build()); + onChanged(); + } else { + familyFilesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder addAllFamilyFiles( + java.lang.Iterable values) { + if (familyFilesBuilder_ == null) { + ensureFamilyFilesIsMutable(); + super.addAll(values, familyFiles_); + onChanged(); + } else { + familyFilesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder clearFamilyFiles() { + if (familyFilesBuilder_ == null) { + familyFiles_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + familyFilesBuilder_.clear(); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public Builder removeFamilyFiles(int index) { + if (familyFilesBuilder_ == null) { + ensureFamilyFilesIsMutable(); + familyFiles_.remove(index); + onChanged(); + } else { + familyFilesBuilder_.remove(index); + } + return this; + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder getFamilyFilesBuilder( + int index) { + return getFamilyFilesFieldBuilder().getBuilder(index); + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder getFamilyFilesOrBuilder( + int index) { + if (familyFilesBuilder_ == null) { + return familyFiles_.get(index); } else { + return familyFilesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public java.util.List + getFamilyFilesOrBuilderList() { + if (familyFilesBuilder_ != null) { + return familyFilesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(familyFiles_); + } + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder addFamilyFilesBuilder() { + return getFamilyFilesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.getDefaultInstance()); + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder addFamilyFilesBuilder( + int index) { + return getFamilyFilesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.getDefaultInstance()); + } + /** + * repeated .SnapshotRegionManifest.FamilyFiles family_files = 3; + */ + public java.util.List + getFamilyFilesBuilderList() { + return getFamilyFilesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder> + getFamilyFilesFieldBuilder() { + if (familyFilesBuilder_ == null) { + familyFilesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFilesOrBuilder>( + familyFiles_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + familyFiles_ = null; + } + return familyFilesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:SnapshotRegionManifest) + } + + static { + defaultInstance = new SnapshotRegionManifest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SnapshotRegionManifest) + } + + public interface SnapshotDataManifestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableSchema table_schema = 1; + /** + * required .TableSchema table_schema = 1; + */ + boolean hasTableSchema(); + /** + * required .TableSchema table_schema = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(); + /** + * required .TableSchema table_schema = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder(); + + // repeated .SnapshotRegionManifest region_manifests = 2; + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + java.util.List + getRegionManifestsList(); + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getRegionManifests(int index); + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + int getRegionManifestsCount(); + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + java.util.List + getRegionManifestsOrBuilderList(); + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder getRegionManifestsOrBuilder( + int index); + } + /** + * Protobuf type {@code SnapshotDataManifest} + */ + public static final class SnapshotDataManifest extends + com.google.protobuf.GeneratedMessage + implements SnapshotDataManifestOrBuilder { + // Use SnapshotDataManifest.newBuilder() to construct. + private SnapshotDataManifest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotDataManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SnapshotDataManifest defaultInstance; + public static SnapshotDataManifest getDefaultInstance() { + return defaultInstance; + } + + public SnapshotDataManifest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SnapshotDataManifest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableSchema_.toBuilder(); + } + tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableSchema_); + tableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + regionManifests_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + regionManifests_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + regionManifests_ = java.util.Collections.unmodifiableList(regionManifests_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotDataManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotDataManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotDataManifest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SnapshotDataManifest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableSchema table_schema = 1; + public static final int TABLE_SCHEMA_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_; + /** + * required .TableSchema table_schema = 1; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableSchema table_schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + return tableSchema_; + } + /** + * required .TableSchema table_schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return tableSchema_; + } + + // repeated .SnapshotRegionManifest region_manifests = 2; + public static final int REGION_MANIFESTS_FIELD_NUMBER = 2; + private java.util.List regionManifests_; + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public java.util.List getRegionManifestsList() { + return regionManifests_; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public java.util.List + getRegionManifestsOrBuilderList() { + return regionManifests_; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public int getRegionManifestsCount() { + return regionManifests_.size(); + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getRegionManifests(int index) { + return regionManifests_.get(index); + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder getRegionManifestsOrBuilder( + int index) { + return regionManifests_.get(index); + } + + private void initFields() { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + regionManifests_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getRegionManifestsCount(); i++) { + if (!getRegionManifests(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableSchema_); + } + for (int i = 0; i < regionManifests_.size(); i++) { + output.writeMessage(2, regionManifests_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableSchema_); + } + for (int i = 0; i < regionManifests_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, regionManifests_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest) obj; + + boolean result = true; + result = result && (hasTableSchema() == other.hasTableSchema()); + if (hasTableSchema()) { + result = result && getTableSchema() + .equals(other.getTableSchema()); + } + result = result && getRegionManifestsList() + .equals(other.getRegionManifestsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + if (getRegionManifestsCount() > 0) { + hash = (37 * hash) + REGION_MANIFESTS_FIELD_NUMBER; + hash = (53 * hash) + getRegionManifestsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SnapshotDataManifest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotDataManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotDataManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableSchemaFieldBuilder(); + getRegionManifestsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (regionManifestsBuilder_ == null) { + regionManifests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + regionManifestsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.internal_static_SnapshotDataManifest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest build() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest result = new org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableSchemaBuilder_ == null) { + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } + if (regionManifestsBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + regionManifests_ = java.util.Collections.unmodifiableList(regionManifests_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.regionManifests_ = regionManifests_; + } else { + result.regionManifests_ = regionManifestsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest.getDefaultInstance()) return this; + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + if (regionManifestsBuilder_ == null) { + if (!other.regionManifests_.isEmpty()) { + if (regionManifests_.isEmpty()) { + regionManifests_ = other.regionManifests_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureRegionManifestsIsMutable(); + regionManifests_.addAll(other.regionManifests_); + } + onChanged(); + } + } else { + if (!other.regionManifests_.isEmpty()) { + if (regionManifestsBuilder_.isEmpty()) { + regionManifestsBuilder_.dispose(); + regionManifestsBuilder_ = null; + regionManifests_ = other.regionManifests_; + bitField0_ = (bitField0_ & ~0x00000002); + regionManifestsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionManifestsFieldBuilder() : null; + } else { + regionManifestsBuilder_.addAllMessages(other.regionManifests_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableSchema()) { + + return false; + } + if (!getTableSchema().isInitialized()) { + + return false; + } + for (int i = 0; i < getRegionManifestsCount(); i++) { + if (!getRegionManifests(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableSchema table_schema = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_; + /** + * required .TableSchema table_schema = 1; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableSchema table_schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + /** + * required .TableSchema table_schema = 1; + */ + public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + onChanged(); + } else { + tableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema table_schema = 1; + */ + public Builder setTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema table_schema = 1; + */ + public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + tableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial(); + } else { + tableSchema_ = value; + } + onChanged(); + } else { + tableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema table_schema = 1; + */ + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableSchema table_schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + /** + * required .TableSchema table_schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_; + } + } + /** + * required .TableSchema table_schema = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + tableSchema_, + getParentForChildren(), + isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + // repeated .SnapshotRegionManifest region_manifests = 2; + private java.util.List regionManifests_ = + java.util.Collections.emptyList(); + private void ensureRegionManifestsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + regionManifests_ = new java.util.ArrayList(regionManifests_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder> regionManifestsBuilder_; + + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public java.util.List getRegionManifestsList() { + if (regionManifestsBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionManifests_); + } else { + return regionManifestsBuilder_.getMessageList(); + } + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public int getRegionManifestsCount() { + if (regionManifestsBuilder_ == null) { + return regionManifests_.size(); + } else { + return regionManifestsBuilder_.getCount(); + } + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest getRegionManifests(int index) { + if (regionManifestsBuilder_ == null) { + return regionManifests_.get(index); + } else { + return regionManifestsBuilder_.getMessage(index); + } + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder setRegionManifests( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest value) { + if (regionManifestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionManifestsIsMutable(); + regionManifests_.set(index, value); + onChanged(); + } else { + regionManifestsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder setRegionManifests( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder builderForValue) { + if (regionManifestsBuilder_ == null) { + ensureRegionManifestsIsMutable(); + regionManifests_.set(index, builderForValue.build()); + onChanged(); + } else { + regionManifestsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder addRegionManifests(org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest value) { + if (regionManifestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionManifestsIsMutable(); + regionManifests_.add(value); + onChanged(); + } else { + regionManifestsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder addRegionManifests( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest value) { + if (regionManifestsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionManifestsIsMutable(); + regionManifests_.add(index, value); + onChanged(); + } else { + regionManifestsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder addRegionManifests( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder builderForValue) { + if (regionManifestsBuilder_ == null) { + ensureRegionManifestsIsMutable(); + regionManifests_.add(builderForValue.build()); + onChanged(); + } else { + regionManifestsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder addRegionManifests( + int index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder builderForValue) { + if (regionManifestsBuilder_ == null) { + ensureRegionManifestsIsMutable(); + regionManifests_.add(index, builderForValue.build()); + onChanged(); + } else { + regionManifestsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder addAllRegionManifests( + java.lang.Iterable values) { + if (regionManifestsBuilder_ == null) { + ensureRegionManifestsIsMutable(); + super.addAll(values, regionManifests_); + onChanged(); + } else { + regionManifestsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder clearRegionManifests() { + if (regionManifestsBuilder_ == null) { + regionManifests_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + regionManifestsBuilder_.clear(); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public Builder removeRegionManifests(int index) { + if (regionManifestsBuilder_ == null) { + ensureRegionManifestsIsMutable(); + regionManifests_.remove(index); + onChanged(); + } else { + regionManifestsBuilder_.remove(index); + } + return this; + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder getRegionManifestsBuilder( + int index) { + return getRegionManifestsFieldBuilder().getBuilder(index); + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder getRegionManifestsOrBuilder( + int index) { + if (regionManifestsBuilder_ == null) { + return regionManifests_.get(index); } else { + return regionManifestsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public java.util.List + getRegionManifestsOrBuilderList() { + if (regionManifestsBuilder_ != null) { + return regionManifestsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionManifests_); + } + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder addRegionManifestsBuilder() { + return getRegionManifestsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.getDefaultInstance()); + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder addRegionManifestsBuilder( + int index) { + return getRegionManifestsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.getDefaultInstance()); + } + /** + * repeated .SnapshotRegionManifest region_manifests = 2; + */ + public java.util.List + getRegionManifestsBuilderList() { + return getRegionManifestsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder> + getRegionManifestsFieldBuilder() { + if (regionManifestsBuilder_ == null) { + regionManifestsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.Builder, org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifestOrBuilder>( + regionManifests_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + regionManifests_ = null; + } + return regionManifestsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:SnapshotDataManifest) + } + + static { + defaultInstance = new SnapshotDataManifest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SnapshotDataManifest) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SnapshotFileInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SnapshotFileInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SnapshotRegionManifest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SnapshotRegionManifest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SnapshotRegionManifest_StoreFile_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SnapshotRegionManifest_StoreFile_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SnapshotRegionManifest_FamilyFiles_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SnapshotRegionManifest_FamilyFiles_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SnapshotDataManifest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SnapshotDataManifest_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\016Snapshot.proto\032\010FS.proto\032\013HBase.proto\"" + + "\211\001\n\020SnapshotFileInfo\022$\n\004type\030\001 \002(\0162\026.Sna" + + "pshotFileInfo.Type\022\r\n\005hfile\030\003 \001(\t\022\022\n\nwal" + + "_server\030\004 \001(\t\022\020\n\010wal_name\030\005 \001(\t\"\032\n\004Type\022" + + "\t\n\005HFILE\020\001\022\007\n\003WAL\020\002\"\257\002\n\026SnapshotRegionMa" + + "nifest\022\017\n\007version\030\001 \001(\005\022 \n\013region_info\030\002" + + " \002(\0132\013.RegionInfo\0229\n\014family_files\030\003 \003(\0132" + + "#.SnapshotRegionManifest.FamilyFiles\032K\n\t" + + "StoreFile\022\014\n\004name\030\001 \002(\t\022\035\n\treference\030\002 \001" + + "(\0132\n.Reference\022\021\n\tfile_size\030\003 \001(\004\032Z\n\013Fam", + "ilyFiles\022\023\n\013family_name\030\001 \002(\014\0226\n\013store_f" + + "iles\030\002 \003(\0132!.SnapshotRegionManifest.Stor" + + "eFile\"m\n\024SnapshotDataManifest\022\"\n\014table_s" + + "chema\030\001 \002(\0132\014.TableSchema\0221\n\020region_mani" + + "fests\030\002 \003(\0132\027.SnapshotRegionManifestBD\n*" + + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\016SnapshotProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_SnapshotFileInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_SnapshotFileInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SnapshotFileInfo_descriptor, + new java.lang.String[] { "Type", "Hfile", "WalServer", "WalName", }); + internal_static_SnapshotRegionManifest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_SnapshotRegionManifest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SnapshotRegionManifest_descriptor, + new java.lang.String[] { "Version", "RegionInfo", "FamilyFiles", }); + internal_static_SnapshotRegionManifest_StoreFile_descriptor = + internal_static_SnapshotRegionManifest_descriptor.getNestedTypes().get(0); + internal_static_SnapshotRegionManifest_StoreFile_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SnapshotRegionManifest_StoreFile_descriptor, + new java.lang.String[] { "Name", "Reference", "FileSize", }); + internal_static_SnapshotRegionManifest_FamilyFiles_descriptor = + internal_static_SnapshotRegionManifest_descriptor.getNestedTypes().get(1); + internal_static_SnapshotRegionManifest_FamilyFiles_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SnapshotRegionManifest_FamilyFiles_descriptor, + new java.lang.String[] { "FamilyName", "StoreFiles", }); + internal_static_SnapshotDataManifest_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_SnapshotDataManifest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SnapshotDataManifest_descriptor, + new java.lang.String[] { "TableSchema", "RegionManifests", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.FSProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/MapReduce.proto b/hbase-protocol/src/main/protobuf/MapReduce.proto index 0c4c29e..61bd888 100644 --- a/hbase-protocol/src/main/protobuf/MapReduce.proto +++ b/hbase-protocol/src/main/protobuf/MapReduce.proto @@ -30,6 +30,7 @@ message ScanMetrics { } message TableSnapshotRegionSplit { - optional RegionSpecifier region = 1; - repeated string locations = 2; + optional TableSchema table = 1; + optional RegionInfo region = 2; + repeated string locations = 3; } diff --git a/hbase-protocol/src/main/protobuf/Snapshot.proto b/hbase-protocol/src/main/protobuf/Snapshot.proto new file mode 100644 index 0000000..34cb919 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/Snapshot.proto @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "SnapshotProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "FS.proto"; +import "HBase.proto"; + +message SnapshotFileInfo { + enum Type { + HFILE = 1; + WAL = 2; + } + + required Type type = 1; + + optional string hfile = 3; + + optional string wal_server = 4; + optional string wal_name = 5; +} + +message SnapshotRegionManifest { + optional int32 version = 1; + + required RegionInfo region_info = 2; + repeated FamilyFiles family_files = 3; + + message StoreFile { + required string name = 1; + optional Reference reference = 2; + + // TODO: Add checksums or other fields to verify the file + optional uint64 file_size = 3; + } + + message FamilyFiles { + required bytes family_name = 1; + repeated StoreFile store_files = 2; + } +} + +message SnapshotDataManifest { + required TableSchema table_schema = 1; + repeated SnapshotRegionManifest region_manifests = 2; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index 447285b..1a24fea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.UUID; @@ -36,12 +37,15 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.snapshot.ExportSnapshot; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; -import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; /** * A Scanner which performs a scan over snapshot files. Using this class requires copying the @@ -99,8 +103,7 @@ public class TableSnapshotScanner extends AbstractClientScanner { */ public TableSnapshotScanner(Configuration conf, Path restoreDir, String snapshotName, Scan scan) throws IOException { - this(conf, new Path(conf.get(HConstants.HBASE_DIR)), - restoreDir, snapshotName, scan); + this(conf, FSUtils.getRootDir(conf), restoreDir, snapshotName, scan); } /** @@ -128,22 +131,21 @@ public class TableSnapshotScanner extends AbstractClientScanner { private void init() throws IOException { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); - //load table descriptor - htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); + // load table descriptor + htd = manifest.getTableDescriptor(); - Set snapshotRegionNames - = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir); - if (snapshotRegionNames == null) { + List regionManifests = manifest.getRegionManifests(); + if (regionManifests == null) { throw new IllegalArgumentException("Snapshot seems empty"); } - regions = new ArrayList(snapshotRegionNames.size()); - for (String regionName : snapshotRegionNames) { + regions = new ArrayList(regionManifests.size()); + for (SnapshotRegionManifest regionManifest : regionManifests) { // load region descriptor - Path regionDir = new Path(snapshotDir, regionName); - HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, - regionDir); + HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo()); if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), hri.getEndKey())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index c8e84dd..0f2e06c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -190,7 +190,7 @@ public class Reference { } } - FSProtos.Reference convert() { + public FSProtos.Reference convert() { FSProtos.Reference.Builder builder = FSProtos.Reference.newBuilder(); builder.setRange(isTopFileRegion(getFileRegion())? FSProtos.Reference.Range.TOP: FSProtos.Reference.Range.BOTTOM); @@ -198,7 +198,7 @@ public class Reference { return builder.build(); } - static Reference convert(final FSProtos.Reference r) { + public static Reference convert(final FSProtos.Reference r) { Reference result = new Reference(); result.splitkey = r.getSplitkey().toByteArray(); result.region = r.getRange() == FSProtos.Reference.Range.TOP? Range.top: Range.bottom; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index f855465..de24ec4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -49,18 +49,18 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableSnapshotScanner; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.snapshot.ExportSnapshot; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; @@ -82,7 +82,7 @@ import com.google.common.annotations.VisibleForTesting; * while there are jobs reading from snapshot files. *

* Usage is similar to TableInputFormat, and - * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, + * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, * boolean, Path)} * can be used to configure the job. *

{@code
@@ -101,12 +101,12 @@ import com.google.common.annotations.VisibleForTesting;
  * 

* HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from * snapshot files and data files. HBase also enforces security because all the requests are handled - * by the server layer, and the user cannot read from the data files directly. - * To read from snapshot files directly from the file system, the user who is running the MR job - * must have sufficient permissions to access snapshot and reference files. - * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase - * user or the user must have group or other priviledges in the filesystem (See HBASE-8369). - * Note that, given other users access to read from snapshot/data files will completely circumvent + * by the server layer, and the user cannot read from the data files directly. + * To read from snapshot files directly from the file system, the user who is running the MR job + * must have sufficient permissions to access snapshot and reference files. + * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase + * user or the user must have group or other priviledges in the filesystem (See HBASE-8369). + * Note that, given other users access to read from snapshot/data files will completely circumvent * the access control enforced by HBase. * @see TableSnapshotScanner */ @@ -119,7 +119,7 @@ public class TableSnapshotInputFormat extends InputFormat locations) { - this.regionName = regionName; + TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo, List locations) { + this.htd = htd; + this.regionInfo = regionInfo; if (locations == null || locations.isEmpty()) { this.locations = new String[0]; } else { @@ -158,9 +160,8 @@ public class TableSnapshotInputFormat extends InputFormat locationsList = split.getLocationsList(); this.locations = locationsList.toArray(new String[locationsList.size()]); } } @VisibleForTesting - static class TableSnapshotRegionRecordReader extends + static class TableSnapshotRegionRecordReader extends RecordReader { private TableSnapshotRegionSplit split; private Scan scan; @@ -205,23 +207,13 @@ public class TableSnapshotInputFormat extends InputFormat snapshotRegionNames - = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir); - if (snapshotRegionNames == null) { + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); + List regionManifests = manifest.getRegionManifests(); + if (regionManifests == null) { throw new IllegalArgumentException("Snapshot seems empty"); } // load table descriptor - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, - snapshotDir); + HTableDescriptor htd = manifest.getTableDescriptor(); Scan scan = TableMapReduceUtil.convertStringToScan(conf .get(TableInputFormat.SCAN)); Path tableDir = new Path(conf.get(TABLE_DIR_KEY)); List splits = new ArrayList(); - for (String regionName : snapshotRegionNames) { + for (SnapshotRegionManifest regionManifest : regionManifests) { // load region descriptor - Path regionDir = new Path(snapshotDir, regionName); - HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, - regionDir); + HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo()); if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), hri.getEndKey())) { @@ -329,7 +318,7 @@ public class TableSnapshotInputFormat extends InputFormat region : regions) { HRegionInfo regionInfo = region.getFirst(); if (regionInfo.isOffline() && (regionInfo.isSplit() || regionInfo.isSplitParent())) { - if (!fs.exists(new Path(snapshotDir, regionInfo.getEncodedName()))) { - LOG.info("Take disabled snapshot of offline region=" + regionInfo); - snapshotDisabledRegion(regionInfo); - } + LOG.info("Take disabled snapshot of offline region=" + regionInfo); + snapshotDisabledRegion(regionInfo); } } } catch (InterruptedException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index 9578c50..131c2cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.snapshot; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; @@ -31,20 +32,23 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSVisitor; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -110,14 +114,16 @@ public final class MasterSnapshotVerifier { */ public void verifySnapshot(Path snapshotDir, Set snapshotServers) throws CorruptedSnapshotException, IOException { + SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs, + snapshotDir, snapshot); // verify snapshot info matches verifySnapshotDescription(snapshotDir); // check that tableinfo is a valid table description - verifyTableInfo(snapshotDir); + verifyTableInfo(manifest); // check that each region is valid - verifyRegions(snapshotDir); + verifyRegions(manifest); } /** @@ -136,8 +142,16 @@ public final class MasterSnapshotVerifier { * Check that the table descriptor for the snapshot is a valid table descriptor * @param snapshotDir snapshot directory to check */ - private void verifyTableInfo(Path snapshotDir) throws IOException { - FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); + private void verifyTableInfo(final SnapshotManifest manifest) throws IOException { + HTableDescriptor htd = manifest.getTableDescriptor(); + if (htd == null) { + throw new CorruptedSnapshotException("Missing Table Descriptor", snapshot); + } + + if (!htd.getNameAsString().equals(snapshot.getTable())) { + throw new CorruptedSnapshotException("Invalid Table Descriptor. Expected " + + snapshot.getTable() + " name, got " + htd.getNameAsString(), snapshot); + } } /** @@ -145,34 +159,36 @@ public final class MasterSnapshotVerifier { * @param snapshotDir snapshot directory to check * @throws IOException if we can't reach hbase:meta or read the files from the FS */ - private void verifyRegions(Path snapshotDir) throws IOException { + private void verifyRegions(final SnapshotManifest manifest) throws IOException { List regions = MetaReader.getTableRegions(this.services.getCatalogTracker(), tableName); - Set snapshotRegions = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir); - if (snapshotRegions == null) { + Map regionManifests = manifest.getRegionManifestsMap(); + if (regionManifests == null) { String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty"; LOG.error(msg); throw new CorruptedSnapshotException(msg); } String errorMsg = ""; - if (snapshotRegions.size() != regions.size()) { - errorMsg = "Regions moved during the snapshot '" + + if (regionManifests.size() != regions.size()) { + errorMsg = "Regions moved during the snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot) + "'. expected=" + - regions.size() + " snapshotted=" + snapshotRegions.size() + "."; + regions.size() + " snapshotted=" + regionManifests.size() + "."; LOG.error(errorMsg); } for (HRegionInfo region : regions) { - if (!snapshotRegions.contains(region.getEncodedName())) { + SnapshotRegionManifest regionManifest = regionManifests.get(region.getEncodedName()); + if (regionManifest == null) { // could happen due to a move or split race. String mesg = " No snapshot region directory found for region:" + region; if (errorMsg.isEmpty()) errorMsg = mesg; LOG.error(mesg); + continue; } - verifyRegion(fs, snapshotDir, region); + verifyRegion(fs, manifest.getSnapshotDir(), region, regionManifest); } if (!errorMsg.isEmpty()) { throw new CorruptedSnapshotException(errorMsg); @@ -185,65 +201,23 @@ public final class MasterSnapshotVerifier { * @param snapshotDir snapshot directory to check * @param region the region to check */ - private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region) - throws IOException { - // make sure we have region in the snapshot - Path regionDir = new Path(snapshotDir, region.getEncodedName()); - - // make sure we have the region info in the snapshot - Path regionInfo = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE); - // make sure the file exists - if (!fs.exists(regionInfo)) { - throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot); + private void verifyRegion(final FileSystem fs, final Path snapshotDir, final HRegionInfo region, + final SnapshotRegionManifest manifest) throws IOException { + HRegionInfo manifestRegionInfo = HRegionInfo.convert(manifest.getRegionInfo()); + if (!region.equals(manifestRegionInfo)) { + String msg = "Manifest region info " + manifestRegionInfo + + "doesn't match expected region:" + region; + throw new CorruptedSnapshotException(msg, snapshot); } - HRegionInfo found = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - if (!region.equals(found)) { - throw new CorruptedSnapshotException("Found region info (" + found - + ") doesn't match expected region:" + region, snapshot); - } - - // make sure we have the expected recovered edits files - TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot); - - // make sure we have all the expected store files - SnapshotReferenceUtil.visitRegionStoreFiles(fs, regionDir, new FSVisitor.StoreFileVisitor() { - public void storeFile(final String regionNameSuffix, final String family, - final String hfileName) throws IOException { - verifyStoreFile(snapshotDir, region, family, hfileName); + // make sure we have all the expected store files + SnapshotReferenceUtil.visitRegionStoreFiles(manifest, + new SnapshotReferenceUtil.StoreFileVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + SnapshotReferenceUtil.verifyStoreFile(services.getConfiguration(), fs, snapshotDir, + snapshot, region, family, storeFile); } }); } - - private void verifyStoreFile(final Path snapshotDir, final HRegionInfo regionInfo, - final String family, final String fileName) throws IOException { - Path refPath = null; - if (StoreFileInfo.isReference(fileName)) { - // If is a reference file check if the parent file is present in the snapshot - Path snapshotHFilePath = new Path(new Path( - new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName); - refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath); - if (!fs.exists(refPath)) { - throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot); - } - } - - Path linkPath; - if (refPath != null && HFileLink.isHFileLink(refPath)) { - linkPath = new Path(family, refPath.getName()); - } else if (HFileLink.isHFileLink(fileName)) { - linkPath = new Path(family, fileName); - } else { - linkPath = new Path(family, HFileLink.createHFileLinkName(tableName, - regionInfo.getEncodedName(), fileName)); - } - - // check if the linked file exists (in the archive, or in the table dir) - HFileLink link = new HFileLink(services.getConfiguration(), linkPath); - if (!link.exists(fs)) { - throw new CorruptedSnapshotException("Can't find hfile: " + fileName - + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath() - + ") directory for the primary table.", snapshot); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java index 2e970a1..2b97505 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; /** * Handler to Restore a snapshot. @@ -120,9 +121,11 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho // 2. Execute the on-disk Restore LOG.debug("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); + SnapshotManifest manifest = SnapshotManifest.open(masterServices.getConfiguration(), fs, + snapshotDir, snapshot); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - masterServices.getConfiguration(), fs, - snapshot, snapshotDir, hTableDescriptor, rootDir, monitor, status); + masterServices.getConfiguration(), fs, manifest, + this.hTableDescriptor, rootDir, monitor, status); RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); // 3. Forces all the RegionStates to be offline diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java index 0eac8be..bd9e59f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java @@ -174,19 +174,24 @@ public class SnapshotFileCache implements Stoppable { // is an illegal access to the cache. Really we could do a mutex-guarded pointer swap on the // cache, but that seems overkill at the moment and isn't necessarily a bottleneck. public synchronized boolean contains(String fileName) throws IOException { - if (this.cache.contains(fileName)) return true; - - refreshCache(); - - // then check again - return this.cache.contains(fileName); + boolean hasFile = this.cache.contains(fileName); + if (!hasFile) { + refreshCache(); + // then check again + hasFile = this.cache.contains(fileName); + } + return hasFile; } private synchronized void refreshCache() throws IOException { - // get the status of the snapshots directory and /.tmp - FileStatus dirStatus, tempStatus; + long lastTimestamp = Long.MAX_VALUE; + boolean hasChanges = false; + + // get the status of the snapshots directory and check if it is has changes try { - dirStatus = fs.getFileStatus(snapshotDir); + FileStatus dirStatus = fs.getFileStatus(snapshotDir); + lastTimestamp = dirStatus.getModificationTime(); + hasChanges |= (lastTimestamp >= lastModifiedTime); } catch (FileNotFoundException e) { if (this.cache.size() > 0) { LOG.error("Snapshot directory: " + snapshotDir + " doesn't exist"); @@ -194,16 +199,28 @@ public class SnapshotFileCache implements Stoppable { return; } + // get the status of the snapshots temporary directory and check if it has changes + // The top-level directory timestamp is not updated, so we have to check the inner-level. try { Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME); - tempStatus = fs.getFileStatus(snapshotTmpDir); + FileStatus tempDirStatus = fs.getFileStatus(snapshotTmpDir); + lastTimestamp = Math.min(lastTimestamp, tempDirStatus.getModificationTime()); + hasChanges |= (lastTimestamp >= lastModifiedTime); + if (!hasChanges) { + FileStatus[] tmpSnapshots = FSUtils.listStatus(fs, snapshotDir); + if (tmpSnapshots != null) { + for (FileStatus dirStatus: tmpSnapshots) { + lastTimestamp = Math.min(lastTimestamp, dirStatus.getModificationTime()); + } + hasChanges |= (lastTimestamp >= lastModifiedTime); + } + } } catch (FileNotFoundException e) { - tempStatus = dirStatus; + // Nothing todo, if the tmp dir is empty } // if the snapshot directory wasn't modified since we last check, we are done - if (dirStatus.getModificationTime() <= lastModifiedTime && - tempStatus.getModificationTime() <= lastModifiedTime) { + if (!hasChanges) { return; } @@ -213,8 +230,7 @@ public class SnapshotFileCache implements Stoppable { // However, snapshot directories are only created once, so this isn't an issue. // 1. update the modified time - this.lastModifiedTime = Math.min(dirStatus.getModificationTime(), - tempStatus.getModificationTime()); + this.lastModifiedTime = lastTimestamp; // 2.clear the cache this.cache.clear(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java index e82ca16..562f682 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java @@ -66,7 +66,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { } @Override - public void setConf(Configuration conf) { + public void setConf(final Configuration conf) { super.setConf(conf); try { long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, @@ -77,7 +77,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() { public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { - return SnapshotReferenceUtil.getHFileNames(fs, snapshotDir); + return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir); } }); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 50afb42..e3a6feb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; import org.apache.hadoop.hbase.snapshot.SnapshotExistsException; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; @@ -540,9 +541,12 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable + "' doesn't exist, can't take snapshot.", snapshot); } - // set the snapshot version, now that we are ready to take it - snapshot = snapshot.toBuilder().setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION) - .build(); + // if not specified, set the snapshot format + if (!snapshot.hasVersion()) { + snapshot = snapshot.toBuilder() + .setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION) + .build(); + } // call pre coproc hook MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); @@ -676,15 +680,16 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // read snapshot information SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - HTableDescriptor snapshotTableDesc = - FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, + snapshotDir, fsSnapshot); + HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor(); TableName tableName = TableName.valueOf(reqSnapshot.getTable()); // stop tracking "abandoned" handlers cleanupSentinels(); // Verify snapshot validity - SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, snapshotDir, fsSnapshot); + SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest); // Execute the restore/clone operation if (MetaReader.tableExists(master.getCatalogTracker(), tableName)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 19c03f9..c30fc06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -51,11 +51,10 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.CopyRecoveredEditsTask; -import org.apache.hadoop.hbase.snapshot.ReferenceRegionHFilesTask; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.TableInfoCopyTask; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.zookeeper.KeeperException; @@ -88,6 +87,9 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh protected final TableLock tableLock; protected final MonitoredTask status; protected final TableName snapshotTable; + protected final SnapshotManifest snapshotManifest; + + protected HTableDescriptor htd; /** * @param snapshot descriptor of the snapshot to take @@ -107,6 +109,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); this.monitor = new ForeignExceptionDispatcher(snapshot.getName()); + this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor); this.tableLockManager = master.getTableLockManager(); this.tableLock = this.tableLockManager.writeLock( @@ -136,7 +139,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh // case of exceptions boolean success = false; try { - loadTableDescriptor(); // check that .tableinfo is present + this.htd = loadTableDescriptor(); // check that .tableinfo is present success = true; } finally { if (!success) { @@ -162,8 +165,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh // an external exception that gets captured here. // write down the snapshot info in the working directory - SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, this.fs); - new TableInfoCopyTask(monitor, snapshot, fs, rootDir).call(); + SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs); + snapshotManifest.addTableDescriptor(this.htd); monitor.rethrowException(); List> regionsAndLocations = @@ -184,16 +187,19 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh } } + // flush the in-memory state, and write the single manifest + status.setStatus("Consolidate snapshot: " + snapshot.getName()); + snapshotManifest.consolidate(); + // verify the snapshot is valid status.setStatus("Verifying snapshot: " + snapshot.getName()); verifier.verifySnapshot(this.workingDir, serverNames); // complete the snapshot, atomically moving from tmp to .snapshot dir. completeSnapshot(this.snapshotDir, this.workingDir, this.fs); - status.markComplete("Snapshot " + snapshot.getName() + " of table " + snapshotTable - + " completed"); - LOG.info("Snapshot " + snapshot.getName() + " of table " + snapshotTable - + " completed"); + msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed"; + status.markComplete(msg); + LOG.info(msg); metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime()); } catch (Exception e) { status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " + @@ -204,8 +210,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh ForeignException ee = new ForeignException(reason, e); monitor.receive(ee); // need to mark this completed to close off and allow cleanup to happen. - cancel("Failed to take snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot) - + "' due to exception"); + cancel(reason); } finally { LOG.debug("Launching cleanup of working dir:" + workingDir); try { @@ -262,26 +267,10 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh */ protected void snapshotDisabledRegion(final HRegionInfo regionInfo) throws IOException { - // 2 copy the regionInfo files to the snapshot - HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, - workingDir, regionInfo); - - // check for error for each region - monitor.rethrowException(); - - // 2 for each region, copy over its recovered.edits directory - Path regionDir = HRegion.getRegionDir(rootDir, regionInfo); - Path snapshotRegionDir = regionFs.getRegionDir(); - new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call(); - monitor.rethrowException(); - status.setStatus("Completed copying recovered edits for offline snapshot of table: " - + snapshotTable); - - // 2 reference all the files in the region - new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).call(); + snapshotManifest.addRegion(FSUtils.getTableDir(rootDir, snapshotTable), regionInfo); monitor.rethrowException(); - status.setStatus("Completed referencing HFiles for offline snapshot of table: " + - snapshotTable); + status.setStatus("Completed referencing HFiles for offline region " + regionInfo.toString() + + " of table: " + snapshotTable); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 0ff88e8..6048ec5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -128,6 +128,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.MutationReplay; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.ClassSize; @@ -194,7 +195,7 @@ public class HRegion implements HeapSize { // , Writable{ public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY = "hbase.hregion.scan.loadColumnFamiliesOnDemand"; - + /** * This is the global default value for durability. All tables/mutations not * defining a durability or using USE_DEFAULT will default to this value. @@ -398,7 +399,7 @@ public class HRegion implements HeapSize { // , Writable{ private RegionServerAccounting rsAccounting; private List> recentFlushes = new ArrayList>(); private long flushCheckInterval; - // flushPerChanges is to prevent too many changes in memstore + // flushPerChanges is to prevent too many changes in memstore private long flushPerChanges; private long blockingMemStoreSize; final long threadWakeFrequency; @@ -498,7 +499,7 @@ public class HRegion implements HeapSize { // , Writable{ throw new IllegalArgumentException(MEMSTORE_FLUSH_PER_CHANGES + " can not exceed " + MAX_FLUSH_PER_CHANGES); } - + this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration", DEFAULT_ROWLOCK_WAIT_DURATION); @@ -736,7 +737,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Store store : this.stores.values()) { try { store.close(); - } catch (IOException e) { + } catch (IOException e) { LOG.warn(e.getMessage()); } } @@ -1073,7 +1074,7 @@ public class HRegion implements HeapSize { // , Writable{ // so we do not lose data throw new DroppedSnapshotException("Failed clearing memory after " + actualFlushes + " attempts on region: " + Bytes.toStringBinary(getRegionName())); - } + } LOG.info("Running extra flush, " + actualFlushes + " (carrying snapshot?) " + this); } @@ -2710,59 +2711,12 @@ public class HRegion implements HeapSize { // , Writable{ */ public void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare) throws IOException { - // This should be "fast" since we don't rewrite store files but instead - // back up the store files by creating a reference - Path rootDir = FSUtils.getRootDir(this.rsServices.getConfiguration()); + Path rootDir = FSUtils.getRootDir(conf); Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); - // 1. dump region meta info into the snapshot directory - LOG.debug("Storing region-info for snapshot."); - HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf, - this.fs.getFileSystem(), snapshotDir, getRegionInfo()); - - // 2. iterate through all the stores in the region - LOG.debug("Creating references for hfiles"); - - // This ensures that we have an atomic view of the directory as long as we have < ls limit - // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in - // batches and may miss files being added/deleted. This could be more robust (iteratively - // checking to see if we have all the files until we are sure), but the limit is currently 1000 - // files/batch, far more than the number of store files under a single column family. - for (Store store : stores.values()) { - // 2.1. build the snapshot reference directory for the store - Path dstStoreDir = snapshotRegionFs.getStoreDir(store.getFamily().getNameAsString()); - List storeFiles = new ArrayList(store.getStorefiles()); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); - } - - // 2.2. iterate through all the store's files and create "references". - int sz = storeFiles.size(); - for (int i = 0; i < sz; i++) { - if (exnSnare != null) { - exnSnare.rethrowException(); - } - StoreFile storeFile = storeFiles.get(i); - Path file = storeFile.getPath(); - - LOG.debug("Creating reference for file (" + (i+1) + "/" + sz + ") : " + file); - Path referenceFile = new Path(dstStoreDir, file.getName()); - boolean success = true; - if (storeFile.isReference()) { - // write the Reference object to the snapshot - storeFile.getFileInfo().getReference().write(fs.getFileSystem(), referenceFile); - } else { - // create "reference" to this store file. It is intentionally an empty file -- all - // necessary information is captured by its fs location and filename. This allows us to - // only figure out what needs to be done via a single nn operation (instead of having to - // open and read the files as well). - success = fs.getFileSystem().createNewFile(referenceFile); - } - if (!success) { - throw new IOException("Failed to create reference file:" + referenceFile); - } - } - } + SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(), + snapshotDir, desc, exnSnare); + manifest.addRegion(this); } /** @@ -3915,14 +3869,14 @@ public class HRegion implements HeapSize { // , Writable{ isStopRow(nextKv.getRowArray(), nextKv.getRowOffset(), nextKv.getRowLength()); // save that the row was empty before filters applied to it. final boolean isEmptyRow = results.isEmpty(); - + // We have the part of the row necessary for filtering (all of it, usually). // First filter with the filterRow(List). FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; if (filter != null && filter.hasFilterRow()) { ret = filter.filterRowCellsWithRet(results); } - + if ((isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE) || filterRow()) { results.clear(); boolean moreRows = nextRow(currentRow, offset, length); @@ -3990,7 +3944,7 @@ public class HRegion implements HeapSize { // , Writable{ return filter != null && (!filter.hasFilterRow()) && filter.filterRow(); } - + private boolean filterRowKey(byte[] row, int offset, short length) throws IOException { return filter != null && filter.filterRowKey(row, offset, length); @@ -5653,7 +5607,7 @@ public class HRegion implements HeapSize { // , Writable{ * modifies data. It has to be called just before a try. * #closeRegionOperation needs to be called in the try's finally block * Acquires a read lock and checks if the region is closing or closed. - * @throws IOException + * @throws IOException */ public void startRegionOperation() throws IOException { startRegionOperation(Operation.ANY); @@ -5661,7 +5615,7 @@ public class HRegion implements HeapSize { // , Writable{ /** * @param op The operation is about to be taken on the region - * @throws IOException + * @throws IOException */ protected void startRegionOperation(Operation op) throws IOException { switch (op) { @@ -5711,7 +5665,7 @@ public class HRegion implements HeapSize { // , Writable{ /** * Closes the lock. This needs to be called in the finally block corresponding * to the try block of #startRegionOperation - * @throws IOException + * @throws IOException */ public void closeRegionOperation() throws IOException { closeRegionOperation(Operation.ANY); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index d597650..a7982e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -77,7 +77,7 @@ public class HRegionFileSystem { private final Configuration conf; private final Path tableDir; private final FileSystem fs; - + /** * In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the * client level. @@ -149,7 +149,7 @@ public class HRegionFileSystem { * @param familyName Column Family Name * @return {@link Path} to the directory of the specified family */ - Path getStoreDir(final String familyName) { + public Path getStoreDir(final String familyName) { return new Path(this.getRegionDir(), familyName); } @@ -176,20 +176,31 @@ public class HRegionFileSystem { return getStoreFiles(Bytes.toString(familyName)); } + public Collection getStoreFiles(final String familyName) throws IOException { + return getStoreFiles(familyName, true); + } + /** * Returns the store files available for the family. * This methods performs the filtering based on the valid store files. * @param familyName Column Family Name * @return a set of {@link StoreFileInfo} for the specified family. */ - public Collection getStoreFiles(final String familyName) throws IOException { + public Collection getStoreFiles(final String familyName, final boolean validate) + throws IOException { Path familyDir = getStoreDir(familyName); FileStatus[] files = FSUtils.listStatus(this.fs, familyDir); - if (files == null) return null; + if (files == null) { + LOG.debug("No StoreFiles for: " + familyDir); + return null; + } ArrayList storeFiles = new ArrayList(files.length); for (FileStatus status: files) { - if (!StoreFileInfo.isValid(status)) continue; + if (validate && !StoreFileInfo.isValid(status)) { + LOG.warn("Invalid StoreFile: " + status.getPath()); + continue; + } storeFiles.add(new StoreFileInfo(this.conf, this.fs, status)); } @@ -328,7 +339,7 @@ public class HRegionFileSystem { Path storeDir = getStoreDir(familyName); if(!fs.exists(storeDir) && !createDir(storeDir)) throw new IOException("Failed creating " + storeDir); - + String name = buildPath.getName(); if (generateNewName) { name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_"); @@ -530,16 +541,16 @@ public class HRegionFileSystem { */ Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f, final byte[] splitRow, final boolean top) throws IOException { - + // Check whether the split row lies in the range of the store file // If it is outside the range, return directly. if (top) { //check if larger than last key. KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow); - byte[] lastKey = f.createReader().getLastKey(); + byte[] lastKey = f.createReader().getLastKey(); // If lastKey is null means storefile is empty. if (lastKey == null) return null; - if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), + if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) { return null; } @@ -549,14 +560,14 @@ public class HRegionFileSystem { byte[] firstKey = f.createReader().getFirstKey(); // If firstKey is null means storefile is empty. if (firstKey == null) return null; - if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), + if (f.getReader().getComparator().compareFlatKey(splitKey.getBuffer(), splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) { return null; - } + } } - + f.getReader().close(true); - + Path splitDir = new Path(getSplitsDir(hri), familyName); // A reference to the bottom half of the hsf store file. Reference r = @@ -655,7 +666,7 @@ public class HRegionFileSystem { * Commit a merged region, moving it from the merges temporary directory to * the proper location in the filesystem. * @param mergedRegionInfo merged region {@link HRegionInfo} - * @throws IOException + * @throws IOException */ void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException { Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 31a26ab..ecac797 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -222,7 +222,7 @@ public class StoreFile { * @return the StoreFile object associated to this StoreFile. * null if the StoreFile is not a reference. */ - StoreFileInfo getFileInfo() { + public StoreFileInfo getFileInfo() { return this.fileInfo; } @@ -613,7 +613,7 @@ public class StoreFile { if (comparator == null) { comparator = KeyValue.COMPARATOR; } - return new Writer(fs, filePath, + return new Writer(fs, filePath, conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); } } @@ -693,7 +693,7 @@ public class StoreFile { /** Bytes per Checksum */ protected int bytesPerChecksum; - + TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); /* isTimeRangeTrackerSet keeps track if the timeRange has already been set * When flushing a memstore, we set TimeRange and use this variable to @@ -722,7 +722,7 @@ public class StoreFile { final Configuration conf, CacheConfig cacheConf, final KVComparator comparator, BloomType bloomType, long maxKeys, - InetSocketAddress[] favoredNodes, HFileContext fileContext) + InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException { writer = HFile.getWriterFactory(conf, cacheConf) .withPath(fs, path) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 2d8a8ea..4d955d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -144,7 +144,7 @@ public class StoreFileInfo { * @return the Reference object associated to this StoreFileInfo. * null if the StoreFile is not a reference. */ - Reference getReference() { + public Reference getReference() { return this.reference; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java deleted file mode 100644 index 3dc2d99..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CopyRecoveredEditsTask.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.snapshot; - -import java.io.IOException; -import java.util.NavigableSet; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; - -/** - * Copy over each of the files in a region's recovered.edits directory to the region's snapshot - * directory. - *

- * This is a serial operation over each of the files in the recovered.edits directory and also - * streams all the bytes to the client and then back to the filesystem, so the files being copied - * should be small or it will (a) suck up a lot of bandwidth, and (b) take a long time. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class CopyRecoveredEditsTask extends SnapshotTask { - - private static final Log LOG = LogFactory.getLog(CopyRecoveredEditsTask.class); - private final FileSystem fs; - private final Path regiondir; - private final Path outputDir; - - /** - * @param snapshot Snapshot being taken - * @param monitor error monitor for the snapshot - * @param fs {@link FileSystem} where the snapshot is being taken - * @param regionDir directory for the region to examine for edits - * @param snapshotRegionDir directory for the region in the snapshot - */ - public CopyRecoveredEditsTask(SnapshotDescription snapshot, ForeignExceptionDispatcher monitor, - FileSystem fs, Path regionDir, Path snapshotRegionDir) { - super(snapshot, monitor); - this.fs = fs; - this.regiondir = regionDir; - this.outputDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir); - } - - @Override - public Void call() throws IOException { - NavigableSet files = HLogUtil.getSplitEditFilesSorted(this.fs, regiondir); - if (files == null || files.size() == 0) return null; - - // copy over each file. - // this is really inefficient (could be trivially parallelized), but is - // really simple to reason about. - for (Path source : files) { - // check to see if the file is zero length, in which case we can skip it - FileStatus stat = fs.getFileStatus(source); - if (stat.getLen() <= 0) continue; - - // its not zero length, so copy over the file - Path out = new Path(outputDir, source.getName()); - LOG.debug("Copying " + source + " to " + out); - FileUtil.copy(fs, source, fs, out, true, fs.getConf()); - - // check for errors to the running operation after each file - this.rethrowException(); - } - return null; - } -} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 6c7f488..a9f297f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -45,18 +45,23 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.io.FileLink; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HLogLink; +import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.mapreduce.JobUtil; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; @@ -95,7 +100,8 @@ public final class ExportSnapshot extends Configured implements Tool { // Export Map-Reduce Counters, to keep track of the progress public enum Counter { MISSING_FILES, COPY_FAILED, BYTES_EXPECTED, BYTES_COPIED, FILES_COPIED }; - private static class ExportMapper extends Mapper { + private static class ExportMapper extends Mapper { final static int REPORT_SIZE = 1 * 1024 * 1024; final static int BUFFER_SIZE = 64 * 1024; @@ -151,35 +157,35 @@ public final class ExportSnapshot extends Configured implements Tool { } @Override - public void map(Text key, NullWritable value, Context context) + public void map(BytesWritable key, NullWritable value, Context context) throws InterruptedException, IOException { - Path inputPath = new Path(key.toString()); - Path outputPath = getOutputPath(inputPath); + SnapshotFileInfo inputInfo = SnapshotFileInfo.parseFrom(key.copyBytes()); + Path outputPath = getOutputPath(inputInfo); - LOG.info("copy file input=" + inputPath + " output=" + outputPath); - copyFile(context, inputPath, outputPath); + copyFile(context, inputInfo, outputPath); } /** * Returns the location where the inputPath will be copied. - * - hfiles are encoded as hfile links hfile-region-table - * - logs are encoded as serverName/logName */ - private Path getOutputPath(final Path inputPath) throws IOException { - Path path; - if (HFileLink.isHFileLink(inputPath) || StoreFileInfo.isReference(inputPath)) { - String family = inputPath.getParent().getName(); - TableName table = - HFileLink.getReferencedTableName(inputPath.getName()); - String region = HFileLink.getReferencedRegionName(inputPath.getName()); - String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); - path = new Path(FSUtils.getTableDir(new Path("./"), table), - new Path(region, new Path(family, hfile))); - } else if (isHLogLinkPath(inputPath)) { - String logName = inputPath.getName(); - path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName); - } else { - path = inputPath; + private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException { + Path path = null; + switch (inputInfo.getType()) { + case HFILE: + Path inputPath = new Path(inputInfo.getHfile()); + String family = inputPath.getParent().getName(); + TableName table =HFileLink.getReferencedTableName(inputPath.getName()); + String region = HFileLink.getReferencedRegionName(inputPath.getName()); + String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); + path = new Path(FSUtils.getTableDir(new Path("./"), table), + new Path(region, new Path(family, hfile))); + break; + case WAL: + Path oldLogsDir = new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME); + path = new Path(oldLogsDir, inputInfo.getWalName()); + break; + default: + throw new IOException("Invalid File Type: " + inputInfo.getType().toString()); } return new Path(outputArchive, path); } @@ -187,7 +193,7 @@ public final class ExportSnapshot extends Configured implements Tool { /* * Used by TestExportSnapshot to simulate a failure */ - private void injectTestFailure(final Context context, final Path inputPath) + private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) throws IOException { if (testFailures) { if (context.getConfiguration().getBoolean(CONF_TEST_RETRY, false)) { @@ -199,33 +205,33 @@ public final class ExportSnapshot extends Configured implements Tool { // retry, but at least we reduce the number of test failures due to // this test exception from the same map task. if (random.nextFloat() < 0.03) { - throw new IOException("TEST RETRY FAILURE: Unable to copy input=" + inputPath + throw new IOException("TEST RETRY FAILURE: Unable to copy input=" + inputInfo + " time=" + System.currentTimeMillis()); } } else { context.getCounter(Counter.COPY_FAILED).increment(1); - throw new IOException("TEST FAILURE: Unable to copy input=" + inputPath); + throw new IOException("TEST FAILURE: Unable to copy input=" + inputInfo); } } } - private void copyFile(final Context context, final Path inputPath, final Path outputPath) - throws IOException { - injectTestFailure(context, inputPath); + private void copyFile(final Context context, final SnapshotFileInfo inputInfo, + final Path outputPath) throws IOException { + injectTestFailure(context, inputInfo); // Get the file information - FileStatus inputStat = getSourceFileStatus(context, inputPath); + FileStatus inputStat = getSourceFileStatus(context, inputInfo); // Verify if the output file exists and is the same that we want to copy if (outputFs.exists(outputPath)) { FileStatus outputStat = outputFs.getFileStatus(outputPath); if (outputStat != null && sameFile(inputStat, outputStat)) { - LOG.info("Skip copy " + inputPath + " to " + outputPath + ", same file."); + LOG.info("Skip copy " + inputStat.getPath() + " to " + outputPath + ", same file."); return; } } - FSDataInputStream in = openSourceFile(context, inputPath); + FSDataInputStream in = openSourceFile(context, inputInfo); try { context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen()); @@ -233,7 +239,7 @@ public final class ExportSnapshot extends Configured implements Tool { outputFs.mkdirs(outputPath.getParent()); FSDataOutputStream out = outputFs.create(outputPath, true); try { - copyData(context, inputPath, in, outputPath, out, inputStat.getLen()); + copyData(context, inputStat.getPath(), in, outputPath, out, inputStat.getLen()); } finally { out.close(); } @@ -267,7 +273,7 @@ public final class ExportSnapshot extends Configured implements Tool { try { if (filesMode > 0 && stat.getPermission().toShort() != filesMode) { outputFs.setPermission(path, new FsPermission(filesMode)); - } else if (!stat.getPermission().equals(refStat.getPermission())) { + } else if (refStat != null && !stat.getPermission().equals(refStat.getPermission())) { outputFs.setPermission(path, refStat.getPermission()); } } catch (IOException e) { @@ -275,8 +281,9 @@ public final class ExportSnapshot extends Configured implements Tool { return false; } - String user = stringIsNotEmpty(filesUser) ? filesUser : refStat.getOwner(); - String group = stringIsNotEmpty(filesGroup) ? filesGroup : refStat.getGroup(); + boolean hasRefStat = (refStat != null); + String user = stringIsNotEmpty(filesUser) || !hasRefStat ? filesUser : refStat.getOwner(); + String group = stringIsNotEmpty(filesGroup) || !hasRefStat ? filesGroup : refStat.getGroup(); if (stringIsNotEmpty(user) || stringIsNotEmpty(group)) { try { if (!(user.equals(stat.getOwner()) && group.equals(stat.getGroup()))) { @@ -359,40 +366,53 @@ public final class ExportSnapshot extends Configured implements Tool { * Throws an IOException if the communication with the inputFs fail or * if the file is not found. */ - private FSDataInputStream openSourceFile(Context context, final Path path) throws IOException { + private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) + throws IOException { try { - if (HFileLink.isHFileLink(path) || StoreFileInfo.isReference(path)) { - return new HFileLink(inputRoot, inputArchive, path).open(inputFs); - } else if (isHLogLinkPath(path)) { - String serverName = path.getParent().getName(); - String logName = path.getName(); - return new HLogLink(inputRoot, serverName, logName).open(inputFs); + FileLink link = null; + switch (fileInfo.getType()) { + case HFILE: + Path inputPath = new Path(fileInfo.getHfile()); + link = new HFileLink(inputRoot, inputArchive, inputPath); + break; + case WAL: + String serverName = fileInfo.getWalServer(); + String logName = fileInfo.getWalName(); + link = new HLogLink(inputRoot, serverName, logName); + break; + default: + throw new IOException("Invalid File Type: " + fileInfo.getType().toString()); } - return inputFs.open(path); + return link.open(inputFs); } catch (IOException e) { context.getCounter(Counter.MISSING_FILES).increment(1); - LOG.error("Unable to open source file=" + path, e); + LOG.error("Unable to open source file=" + fileInfo.toString(), e); throw e; } } - private FileStatus getSourceFileStatus(Context context, final Path path) throws IOException { + private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo) + throws IOException { try { - if (HFileLink.isHFileLink(path) || StoreFileInfo.isReference(path)) { - HFileLink link = new HFileLink(inputRoot, inputArchive, path); - return link.getFileStatus(inputFs); - } else if (isHLogLinkPath(path)) { - String serverName = path.getParent().getName(); - String logName = path.getName(); - return new HLogLink(inputRoot, serverName, logName).getFileStatus(inputFs); + FileLink link = null; + switch (fileInfo.getType()) { + case HFILE: + Path inputPath = new Path(fileInfo.getHfile()); + link = new HFileLink(inputRoot, inputArchive, inputPath); + break; + case WAL: + link = new HLogLink(inputRoot, fileInfo.getWalServer(), fileInfo.getWalName()); + break; + default: + throw new IOException("Invalid File Type: " + fileInfo.getType().toString()); } - return inputFs.getFileStatus(path); + return link.getFileStatus(inputFs); } catch (FileNotFoundException e) { context.getCounter(Counter.MISSING_FILES).increment(1); - LOG.error("Unable to get the status for source file=" + path, e); + LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e); throw e; } catch (IOException e) { - LOG.error("Unable to get the status for source file=" + path, e); + LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e); throw e; } } @@ -426,38 +446,38 @@ public final class ExportSnapshot extends Configured implements Tool { return inChecksum.equals(outChecksum); } - - /** - * HLog files are encoded as serverName/logName - * and since all the other files should be in /hbase/table/..path.. - * we can rely on the depth, for now. - */ - private static boolean isHLogLinkPath(final Path path) { - return path.depth() == 2; - } } /** * Extract the list of files (HFiles/HLogs) to copy using Map-Reduce. * @return list of files referenced by the snapshot (pair of path and size) */ - private List> getSnapshotFiles(final FileSystem fs, final Path snapshotDir) - throws IOException { + private List> getSnapshotFiles(final FileSystem fs, + final Path snapshotDir) throws IOException { SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - final List> files = new ArrayList>(); - final TableName table = - TableName.valueOf(snapshotDesc.getTable()); + final List> files = new ArrayList>(); + final TableName table = TableName.valueOf(snapshotDesc.getTable()); final Configuration conf = getConf(); // Get snapshot files - SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir, - new SnapshotReferenceUtil.FileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - Path path = HFileLink.createPath(table, region, family, hfile); - long size = new HFileLink(conf, path).getFileStatus(fs).getLen(); - files.add(new Pair(path, size)); + SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc, + new SnapshotReferenceUtil.SnapshotVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + String region = regionInfo.getEncodedName(); + String hfile = storeFile.getName(); + Path path = HFileLink.createPath(table, region, family, hfile); + + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() + .setType(SnapshotFileInfo.Type.HFILE) + .setHfile(path.toString()) + .build(); + + long size = new HFileLink(conf, path).getFileStatus(fs).getLen(); + files.add(new Pair(fileInfo, size)); + } } public void recoveredEdits (final String region, final String logfile) @@ -467,8 +487,14 @@ public final class ExportSnapshot extends Configured implements Tool { public void logFile (final String server, final String logfile) throws IOException { + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() + .setType(SnapshotFileInfo.Type.WAL) + .setWalServer(server) + .setWalName(logfile) + .build(); + long size = new HLogLink(conf, server, logfile).getFileStatus(fs).getLen(); - files.add(new Pair(new Path(server, logfile), size)); + files.add(new Pair(fileInfo, size)); } }); @@ -483,34 +509,35 @@ public final class ExportSnapshot extends Configured implements Tool { * and then each group fetch the bigger file available, iterating through groups * alternating the direction. */ - static List> getBalancedSplits(final List> files, int ngroups) { + static List> getBalancedSplits( + final List> files, final int ngroups) { // Sort files by size, from small to big - Collections.sort(files, new Comparator>() { - public int compare(Pair a, Pair b) { + Collections.sort(files, new Comparator>() { + public int compare(Pair a, Pair b) { long r = a.getSecond() - b.getSecond(); return (r < 0) ? -1 : ((r > 0) ? 1 : 0); } }); // create balanced groups - List> fileGroups = new LinkedList>(); + List> fileGroups = new LinkedList>(); long[] sizeGroups = new long[ngroups]; int hi = files.size() - 1; int lo = 0; - List group; + List group; int dir = 1; int g = 0; while (hi >= lo) { if (g == fileGroups.size()) { - group = new LinkedList(); + group = new LinkedList(); fileGroups.add(group); } else { group = fileGroups.get(g); } - Pair fileInfo = files.get(hi--); + Pair fileInfo = files.get(hi--); // add the hi one sizeGroups[g] += fileInfo.getSecond(); @@ -550,25 +577,25 @@ public final class ExportSnapshot extends Configured implements Tool { * and the number of the files to copy. */ private static Path[] createInputFiles(final Configuration conf, final Path inputFolderPath, - final List> snapshotFiles, int mappers) + final List> snapshotFiles, int mappers) throws IOException, InterruptedException { FileSystem fs = inputFolderPath.getFileSystem(conf); LOG.debug("Input folder location: " + inputFolderPath); - List> splits = getBalancedSplits(snapshotFiles, mappers); + List> splits = getBalancedSplits(snapshotFiles, mappers); Path[] inputFiles = new Path[splits.size()]; - Text key = new Text(); + BytesWritable key = new BytesWritable(); for (int i = 0; i < inputFiles.length; i++) { - List files = splits.get(i); + List files = splits.get(i); inputFiles[i] = new Path(inputFolderPath, String.format("export-%d.seq", i)); SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inputFiles[i], - Text.class, NullWritable.class); + BytesWritable.class, NullWritable.class); LOG.debug("Input split: " + i); try { - for (Path file: files) { - LOG.debug(file.toString()); - key.set(file.toString()); + for (SnapshotFileInfo file: files) { + byte[] pbFileInfo = file.toByteArray(); + key.set(pbFileInfo, 0, pbFileInfo.length); writer.append(key, NullWritable.get()); } } finally { @@ -583,7 +610,7 @@ public final class ExportSnapshot extends Configured implements Tool { * Run Map-Reduce Job to perform the files copy. */ private void runCopyJob(final Path inputRoot, final Path outputRoot, - final List> snapshotFiles, final boolean verifyChecksum, + final List> snapshotFiles, final boolean verifyChecksum, final String filesUser, final String filesGroup, final int filesMode, final int mappers) throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); @@ -691,7 +718,7 @@ public final class ExportSnapshot extends Configured implements Tool { System.err.println("UNEXPECTED: " + cmd); printUsageAndExit(); } - } catch (Exception e) { + } catch (IOException e) { printUsageAndExit(); } } @@ -748,7 +775,7 @@ public final class ExportSnapshot extends Configured implements Tool { // Step 0 - Extract snapshot files to copy LOG.info("Loading Snapshot hfile list"); - final List> files = getSnapshotFiles(inputFs, snapshotDir); + final List> files = getSnapshotFiles(inputFs, snapshotDir); if (mappers == 0 && files.size() > 0) { mappers = 1 + (files.size() / conf.getInt(CONF_MAP_GROUP, 10)); mappers = Math.min(mappers, files.size()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java deleted file mode 100644 index 60d48d9..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.snapshot; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.util.FSUtils; - -/** - * Reference all the hfiles in a region for a snapshot. - *

- * Doesn't take into acccount if the hfiles are valid or not, just keeps track of what's in the - * region's directory. - */ -public class ReferenceRegionHFilesTask extends SnapshotTask { - - public static final Log LOG = LogFactory.getLog(ReferenceRegionHFilesTask.class); - private final Path regiondir; - private final FileSystem fs; - private final PathFilter fileFilter; - private final Path snapshotDir; - - /** - * Reference all the files in the given region directory - * @param snapshot snapshot for which to add references - * @param monitor to check/send error - * @param regionDir region directory to look for errors - * @param fs {@link FileSystem} where the snapshot/region live - * @param regionSnapshotDir directory in the snapshot to store region files - */ - public ReferenceRegionHFilesTask(final SnapshotDescription snapshot, - ForeignExceptionDispatcher monitor, Path regionDir, final FileSystem fs, Path regionSnapshotDir) { - super(snapshot, monitor); - this.regiondir = regionDir; - this.fs = fs; - - this.fileFilter = new PathFilter() { - @Override - public boolean accept(Path path) { - try { - return fs.isFile(path); - } catch (IOException e) { - LOG.error("Failed to reach fs to check file:" + path + ", marking as not file"); - ReferenceRegionHFilesTask.this.snapshotFailure("Failed to reach fs to check file status", - e); - return false; - } - } - }; - this.snapshotDir = regionSnapshotDir; - } - - @Override - public Void call() throws IOException { - FileStatus[] families = FSUtils.listStatus(fs, regiondir, new FSUtils.FamilyDirFilter(fs)); - - // if no families, then we are done again - if (families == null || families.length == 0) { - LOG.info("No families under region directory:" + regiondir - + ", not attempting to add references."); - return null; - } - - // snapshot directories to store the hfile reference - List snapshotFamilyDirs = TakeSnapshotUtils.getFamilySnapshotDirectories(snapshot, - snapshotDir, families); - - LOG.debug("Add hfile references to snapshot directories:" + snapshotFamilyDirs); - for (int i = 0; i < families.length; i++) { - FileStatus family = families[i]; - Path familyDir = family.getPath(); - // get all the hfiles in the family - FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir, fileFilter); - - // if no hfiles, then we are done with this family - if (hfiles == null || hfiles.length == 0) { - LOG.debug("Not hfiles found for family: " + familyDir + ", skipping."); - continue; - } - - // make the snapshot's family directory - Path snapshotFamilyDir = snapshotFamilyDirs.get(i); - fs.mkdirs(snapshotFamilyDir); - - // create a reference for each hfile - for (FileStatus hfile : hfiles) { - // references are 0-length files, relying on file name. - Path referenceFile = new Path(snapshotFamilyDir, hfile.getPath().getName()); - LOG.debug("Creating reference for:" + hfile.getPath() + " at " + referenceFile); - if (!fs.createNewFile(referenceFile)) { - throw new IOException("Failed to create reference file:" + referenceFile); - } - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Finished referencing hfiles, current region state:"); - FSUtils.logFileSystemState(fs, regiondir, LOG); - LOG.debug("and the snapshot directory:"); - FSUtils.logFileSystemState(fs, snapshotDir, LOG); - } - return null; - } -} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 83dbaa1..9bfcfdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.snapshot; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -46,15 +47,17 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSVisitor; import org.apache.hadoop.hbase.util.ModifyRegionUtils; @@ -115,9 +118,9 @@ public class RestoreSnapshotHelper { private final ForeignExceptionDispatcher monitor; private final MonitoredTask status; + private final SnapshotManifest snapshotManifest; private final SnapshotDescription snapshotDesc; private final TableName snapshotTable; - private final Path snapshotDir; private final HTableDescriptor tableDesc; private final Path rootDir; @@ -128,8 +131,7 @@ public class RestoreSnapshotHelper { public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, - final SnapshotDescription snapshotDescription, - final Path snapshotDir, + final SnapshotManifest manifest, final HTableDescriptor tableDescriptor, final Path rootDir, final ForeignExceptionDispatcher monitor, @@ -137,9 +139,9 @@ public class RestoreSnapshotHelper { { this.fs = fs; this.conf = conf; - this.snapshotDesc = snapshotDescription; - this.snapshotTable = TableName.valueOf(snapshotDescription.getTable()); - this.snapshotDir = snapshotDir; + this.snapshotManifest = manifest; + this.snapshotDesc = manifest.getSnapshotDescription(); + this.snapshotTable = TableName.valueOf(snapshotDesc.getTable()); this.tableDesc = tableDescriptor; this.rootDir = rootDir; this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); @@ -153,14 +155,19 @@ public class RestoreSnapshotHelper { */ public RestoreMetaChanges restoreHdfsRegions() throws IOException { LOG.debug("starting restore"); - Set snapshotRegionNames = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir); - if (snapshotRegionNames == null) { + + Map regionManifests = snapshotManifest.getRegionManifestsMap(); + if (regionManifests == null) { LOG.warn("Nothing to restore. Snapshot " + snapshotDesc + " looks empty"); return null; } RestoreMetaChanges metaChanges = new RestoreMetaChanges(parentsMap); + // Take a copy of the manifest.keySet() since we are going to modify + // this instance, by removing the regions already present in the restore dir. + Set regionNames = new HashSet(regionManifests.keySet()); + // Identify which region are still available and which not. // NOTE: we rely upon the region name as: "table name, start key, end key" List tableRegions = getTableRegions(); @@ -168,9 +175,9 @@ public class RestoreSnapshotHelper { monitor.rethrowException(); for (HRegionInfo regionInfo: tableRegions) { String regionName = regionInfo.getEncodedName(); - if (snapshotRegionNames.contains(regionName)) { + if (regionNames.contains(regionName)) { LOG.info("region to restore: " + regionName); - snapshotRegionNames.remove(regionName); + regionNames.remove(regionName); metaChanges.addRegionToRestore(regionInfo); } else { LOG.info("region to remove: " + regionName); @@ -181,7 +188,7 @@ public class RestoreSnapshotHelper { // Restore regions using the snapshot data monitor.rethrowException(); status.setStatus("Restoring table regions..."); - restoreHdfsRegions(metaChanges.getRegionsToRestore()); + restoreHdfsRegions(regionManifests, metaChanges.getRegionsToRestore()); status.setStatus("Finished restoring all table regions."); // Remove regions from the current table @@ -192,30 +199,23 @@ public class RestoreSnapshotHelper { } // Regions to Add: present in the snapshot but not in the current table - if (snapshotRegionNames.size() > 0) { - List regionsToAdd = new LinkedList(); + if (regionNames.size() > 0) { + List regionsToAdd = new ArrayList(regionNames.size()); monitor.rethrowException(); - for (String regionName: snapshotRegionNames) { + for (String regionName: regionNames) { LOG.info("region to add: " + regionName); - Path regionDir = new Path(snapshotDir, regionName); - regionsToAdd.add(HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir)); + regionsToAdd.add(HRegionInfo.convert(regionManifests.get(regionName).getRegionInfo())); } // Create new regions cloning from the snapshot monitor.rethrowException(); status.setStatus("Cloning regions..."); - HRegionInfo[] clonedRegions = cloneHdfsRegions(regionsToAdd); + HRegionInfo[] clonedRegions = cloneHdfsRegions(regionManifests, regionsToAdd); metaChanges.setNewRegions(clonedRegions); status.setStatus("Finished cloning regions."); } - // Restore WALs - monitor.rethrowException(); - status.setStatus("Restoring WALs to table..."); - restoreWALs(); - status.setStatus("Finished restoring WALs to table."); - return metaChanges; } @@ -357,19 +357,34 @@ public class RestoreSnapshotHelper { /** * Restore specified regions by restoring content to the snapshot state. */ - private void restoreHdfsRegions(final List regions) throws IOException { + private void restoreHdfsRegions(final Map regionManifests, + final List regions) throws IOException { if (regions == null || regions.size() == 0) return; - for (HRegionInfo hri: regions) restoreRegion(hri); + for (HRegionInfo hri: regions) { + restoreRegion(hri, regionManifests.get(hri.getEncodedName())); + } + } + + private Map> getRegionHFileReferences( + final SnapshotRegionManifest manifest) { + Map> familyMap = + new HashMap>(manifest.getFamilyFilesCount()); + for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + familyMap.put(familyFiles.getFamilyName().toStringUtf8(), + new ArrayList(familyFiles.getStoreFilesList())); + } + return familyMap; } /** * Restore region by removing files not in the snapshot * and adding the missing ones from the snapshot. */ - private void restoreRegion(HRegionInfo regionInfo) throws IOException { - Path snapshotRegionDir = new Path(snapshotDir, regionInfo.getEncodedName()); - Map> snapshotFiles = - SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir); + private void restoreRegion(final HRegionInfo regionInfo, + final SnapshotRegionManifest regionManifest) throws IOException { + Map> snapshotFiles = + getRegionHFileReferences(regionManifest); + Path regionDir = new Path(tableDir, regionInfo.getEncodedName()); String tableName = tableDesc.getTableName().getNameAsString(); @@ -377,32 +392,34 @@ public class RestoreSnapshotHelper { for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { byte[] family = Bytes.toBytes(familyDir.getName()); Set familyFiles = getTableRegionFamilyFiles(familyDir); - List snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName()); + List snapshotFamilyFiles = + snapshotFiles.remove(familyDir.getName()); if (snapshotFamilyFiles != null) { - List hfilesToAdd = new LinkedList(); - for (String hfileName: snapshotFamilyFiles) { - if (familyFiles.contains(hfileName)) { + List hfilesToAdd = + new ArrayList(); + for (SnapshotRegionManifest.StoreFile storeFile: snapshotFamilyFiles) { + if (familyFiles.contains(storeFile.getName())) { // HFile already present - familyFiles.remove(hfileName); + familyFiles.remove(storeFile.getName()); } else { // HFile missing - hfilesToAdd.add(hfileName); + hfilesToAdd.add(storeFile); } } // Remove hfiles not present in the snapshot for (String hfileName: familyFiles) { Path hfile = new Path(familyDir, hfileName); - LOG.trace("Removing hfile=" + hfile + + LOG.trace("Removing hfile=" + hfileName + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile); } // Restore Missing files - for (String hfileName: hfilesToAdd) { - LOG.trace("Adding HFileLink " + hfileName + + for (SnapshotRegionManifest.StoreFile storeFile: hfilesToAdd) { + LOG.debug("Adding HFileLink " + storeFile.getName() + " to region=" + regionInfo.getEncodedName() + " table=" + tableName); - restoreStoreFile(familyDir, regionInfo, hfileName); + restoreStoreFile(familyDir, regionInfo, storeFile); } } else { // Family doesn't exists in the snapshot @@ -414,15 +431,16 @@ public class RestoreSnapshotHelper { } // Add families not present in the table - for (Map.Entry> familyEntry: snapshotFiles.entrySet()) { + for (Map.Entry> familyEntry: + snapshotFiles.entrySet()) { Path familyDir = new Path(regionDir, familyEntry.getKey()); if (!fs.mkdirs(familyDir)) { throw new IOException("Unable to create familyDir=" + familyDir); } - for (String hfileName: familyEntry.getValue()) { - LOG.trace("Adding HFileLink " + hfileName + " to table=" + tableName); - restoreStoreFile(familyDir, regionInfo, hfileName); + for (SnapshotRegionManifest.StoreFile storeFile: familyEntry.getValue()) { + LOG.trace("Adding HFileLink " + storeFile.getName() + " to table=" + tableName); + restoreStoreFile(familyDir, regionInfo, storeFile); } } } @@ -448,7 +466,8 @@ public class RestoreSnapshotHelper { * Clone specified regions. For each region create a new region * and create a HFileLink for each hfile. */ - private HRegionInfo[] cloneHdfsRegions(final List regions) throws IOException { + private HRegionInfo[] cloneHdfsRegions(final Map regionManifests, + final List regions) throws IOException { if (regions == null || regions.size() == 0) return null; final Map snapshotRegions = @@ -476,7 +495,8 @@ public class RestoreSnapshotHelper { tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { @Override public void fillRegion(final HRegion region) throws IOException { - cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName())); + HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName()); + cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName())); } }); @@ -494,21 +514,17 @@ public class RestoreSnapshotHelper { * @param region {@link HRegion} cloned * @param snapshotRegionInfo */ - private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo) - throws IOException { - final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName()); + private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo, + final SnapshotRegionManifest manifest) throws IOException { final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); final String tableName = tableDesc.getTableName().getNameAsString(); - SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir, - new FSVisitor.StoreFileVisitor() { - @Override - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - LOG.info("Adding HFileLink " + hfile + " to table=" + tableName); - Path familyDir = new Path(regionDir, family); - restoreStoreFile(familyDir, snapshotRegionInfo, hfile); - } - }); + for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8()); + for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { + LOG.info("Adding HFileLink " + storeFile.getName() + " to table=" + tableName); + restoreStoreFile(familyDir, snapshotRegionInfo, storeFile); + } + } } /** @@ -524,11 +540,12 @@ public class RestoreSnapshotHelper { * @param hfileName store file name (can be a Reference, HFileLink or simple HFile) */ private void restoreStoreFile(final Path familyDir, final HRegionInfo regionInfo, - final String hfileName) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + String hfileName = storeFile.getName(); if (HFileLink.isHFileLink(hfileName)) { HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName); } else if (StoreFileInfo.isReference(hfileName)) { - restoreReferenceFile(familyDir, regionInfo, hfileName); + restoreReferenceFile(familyDir, regionInfo, storeFile); } else { HFileLink.create(conf, fs, familyDir, regionInfo, hfileName); } @@ -553,7 +570,9 @@ public class RestoreSnapshotHelper { * @param hfileName reference file name */ private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo, - final String hfileName) throws IOException { + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + String hfileName = storeFile.getName(); + // Extract the referred information (hfile name and parent region) Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path( snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()), @@ -577,16 +596,21 @@ public class RestoreSnapshotHelper { Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName); // Create the new reference - InputStream in; - if (linkPath != null) { - in = new HFileLink(conf, linkPath).open(fs); + if (storeFile.hasReference()) { + Reference reference = Reference.convert(storeFile.getReference()); + reference.write(fs, outPath); } else { - linkPath = new Path(new Path(HRegion.getRegionDir(snapshotDir, regionInfo.getEncodedName()), - familyDir.getName()), hfileName); - in = fs.open(linkPath); + InputStream in; + if (linkPath != null) { + in = new HFileLink(conf, linkPath).open(fs); + } else { + linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), + regionInfo.getEncodedName()), familyDir.getName()), hfileName); + in = fs.open(linkPath); + } + OutputStream out = fs.create(outPath); + IOUtils.copyBytes(in, out, conf); } - OutputStream out = fs.create(outPath); - IOUtils.copyBytes(in, out, conf); // Add the daughter region to the map String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes())); @@ -619,43 +643,6 @@ public class RestoreSnapshotHelper { } /** - * Restore snapshot WALs. - * - * Global Snapshot keep a reference to region servers logs present during the snapshot. - * (/hbase/.snapshot/snapshotName/.logs/hostName/logName) - * - * Since each log contains different tables data, logs must be split to - * extract the table that we are interested in. - */ - private void restoreWALs() throws IOException { - final SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir, - snapshotTable, regionsMap); - // TODO: use executors to parallelize splitting - // TODO: once split, we do not need to split again for other restores - try { - // Recover.Edits - SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir, - new FSVisitor.RecoveredEditsVisitor() { - @Override - public void recoveredEdits (final String region, final String logfile) throws IOException { - Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile); - logSplitter.splitRecoveredEdit(path); - } - }); - - // Region Server Logs - SnapshotReferenceUtil.visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() { - @Override - public void logFile (final String server, final String logfile) throws IOException { - logSplitter.splitLog(server, logfile); - } - }); - } finally { - logSplitter.close(); - } - } - - /** * @return the set of the regions contained in the table */ private List getTableRegions() throws IOException { @@ -720,16 +707,14 @@ public class RestoreSnapshotHelper { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - - //load table descriptor - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); MonitoredTask status = TaskMonitor.get().createStatus( "Restoring snapshot '" + snapshotName + "' to directory " + restoreDir); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(); - RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, snapshotDesc, - snapshotDir, htd, restoreDir, monitor, status); + RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, + manifest, manifest.getTableDescriptor(), restoreDir, monitor, status); helper.restoreHdfsRegions(); // TODO: parallelize. if (LOG.isDebugEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 34d3224..df2c4ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; @@ -91,7 +92,7 @@ public class SnapshotDescriptionUtils { * Version of the fs layout for a snapshot. Future snapshots may have different file layouts, * which we may need to read in differently. */ - public static final int SNAPSHOT_LAYOUT_VERSION = 0; + public static final int SNAPSHOT_LAYOUT_VERSION = SnapshotManifestV2.DESCRIPTOR_VERSION; // snapshot directory constants /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 08a444b..7aebded 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.util.StringUtils; @@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HLogLink; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -184,10 +186,10 @@ public final class SnapshotInfo extends Configured implements Tool { * @param hfile store file name * @return the store file information */ - FileInfo addStoreFile(final String region, final String family, final String hfile) - throws IOException { - TableName table = snapshotTable; - HFileLink link = HFileLink.create(conf, table, region, family, hfile); + FileInfo addStoreFile(final HRegionInfo region, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + HFileLink link = HFileLink.create(conf, snapshotTable, region.getEncodedName(), + family, storeFile.getName()); boolean inArchive = false; long size = -1; try { @@ -245,9 +247,7 @@ public final class SnapshotInfo extends Configured implements Tool { private FileSystem fs; private Path rootDir; - private HTableDescriptor snapshotTableDesc; - private SnapshotDescription snapshotDesc; - private Path snapshotDir; + private SnapshotManifest snapshotManifest; @Override public int run(String[] args) throws IOException, InterruptedException { @@ -309,14 +309,14 @@ public final class SnapshotInfo extends Configured implements Tool { * @return false if snapshot is not found */ private boolean loadSnapshotInfo(final String snapshotName) throws IOException { - snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); if (!fs.exists(snapshotDir)) { LOG.warn("Snapshot '" + snapshotName + "' not found in: " + snapshotDir); return false; } - snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); - snapshotTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc); return true; } @@ -324,12 +324,13 @@ public final class SnapshotInfo extends Configured implements Tool { * Dump the {@link SnapshotDescription} */ private void printInfo() { + SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); System.out.println("Snapshot Info"); System.out.println("----------------------------------------"); System.out.println(" Name: " + snapshotDesc.getName()); System.out.println(" Type: " + snapshotDesc.getType()); - System.out.println(" Table: " + snapshotTableDesc.getTableName().getNameAsString()); + System.out.println(" Table: " + snapshotDesc.getTable()); System.out.println(" Format: " + snapshotDesc.getVersion()); System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime()))); System.out.println(); @@ -341,7 +342,7 @@ public final class SnapshotInfo extends Configured implements Tool { private void printSchema() { System.out.println("Table Descriptor"); System.out.println("----------------------------------------"); - System.out.println(snapshotTableDesc.toString()); + System.out.println(snapshotManifest.getTableDescriptor().toString()); System.out.println(); } @@ -356,18 +357,20 @@ public final class SnapshotInfo extends Configured implements Tool { } // Collect information about hfiles and logs in the snapshot - final String table = snapshotTableDesc.getTableName().getNameAsString(); - final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, this.snapshotDesc); - SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir, - new SnapshotReferenceUtil.FileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - SnapshotStats.FileInfo info = stats.addStoreFile(region, family, hfile); - + final SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); + final String table = snapshotDesc.getTable(); + final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, snapshotDesc); + SnapshotReferenceUtil.visitReferencedFiles(getConf(), fs, + snapshotManifest.getSnapshotDir(), snapshotDesc, new SnapshotReferenceUtil.SnapshotVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (storeFile.hasReference()) return; + + SnapshotStats.FileInfo info = stats.addStoreFile(regionInfo, family, storeFile); if (showFiles) { System.out.printf("%8s %s/%s/%s/%s %s%n", (info.isMissing() ? "-" : StringUtils.humanReadableInt(info.getSize())), - table, region, family, hfile, + table, regionInfo.getEncodedName(), family, storeFile.getName(), (info.inArchive() ? "(archive)" : info.isMissing() ? "(NOT FOUND)" : "")); } } @@ -444,11 +447,13 @@ public final class SnapshotInfo extends Configured implements Tool { FileSystem fs = FileSystem.get(conf); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); final SnapshotStats stats = new SnapshotStats(conf, fs, snapshot); - SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir, - new SnapshotReferenceUtil.FileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - stats.addStoreFile(region, family, hfile); + SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshot, + new SnapshotReferenceUtil.SnapshotVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + if (!storeFile.hasReference()) { + stats.addStoreFile(regionInfo, family, storeFile); + } } public void recoveredEdits (final String region, final String logfile) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java new file mode 100644 index 0000000..91b8a36 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -0,0 +1,442 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.snapshot; + +import java.io.IOException; +import java.io.FileNotFoundException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.FSTableDescriptors; + +import com.google.protobuf.HBaseZeroCopyByteString; + +/** + * Utility class to help read/write the Snapshot Manifest. + * + * The snapshot format is transparent for the users of this class, + * once the snapshot is written, it will never be modified. + * On open() the snapshot will be loaded to the current in-memory format. + */ +@InterfaceAudience.Private +public class SnapshotManifest { + private static final Log LOG = LogFactory.getLog(SnapshotManifest.class); + + private static final String DATA_MANIFEST_NAME = "data.manifest"; + + private List regionManifests; + private SnapshotDescription desc; + private HTableDescriptor htd; + + private final ForeignExceptionSnare monitor; + private final Configuration conf; + private final Path workingDir; + private final FileSystem fs; + + private SnapshotManifest(final Configuration conf, final FileSystem fs, + final Path workingDir, final SnapshotDescription desc, + final ForeignExceptionSnare monitor) { + this.monitor = monitor; + this.desc = desc; + this.workingDir = workingDir; + this.conf = conf; + this.fs = fs; + } + + /** + * Return a SnapshotManifest instance, used for writing a snapshot. + * + * There are two usage pattern: + * - The Master will create a manifest, add the descriptor, offline regions + * and consolidate the snapshot by writing all the pending stuff on-disk. + * manifest = SnapshotManifest.create(...) + * manifest.addRegion(tableDir, hri) + * manifest.consolidate() + * - The RegionServer will create a single region manifest + * manifest = SnapshotManifest.create(...) + * manifest.addRegion(region) + */ + public static SnapshotManifest create(final Configuration conf, final FileSystem fs, + final Path workingDir, final SnapshotDescription desc, + final ForeignExceptionSnare monitor) { + return new SnapshotManifest(conf, fs, workingDir, desc, monitor); + } + + /** + * Return a SnapshotManifest instance with the information already loaded in-memory. + * SnapshotManifest manifest = SnapshotManifest.open(...) + * HTableDescriptor htd = manifest.getTableDescriptor() + * for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests()) + * hri = regionManifest.getRegionInfo() + * for (regionManifest.getFamilyFiles()) + * ... + */ + public static SnapshotManifest open(final Configuration conf, final FileSystem fs, + final Path workingDir, final SnapshotDescription desc) throws IOException { + SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null); + manifest.load(); + return manifest; + } + + + /** + * Add the table descriptor to the snapshot manifest + */ + public void addTableDescriptor(final HTableDescriptor htd) throws IOException { + this.htd = htd; + } + + interface RegionVisitor { + TRegion regionOpen(final HRegionInfo regionInfo) throws IOException; + void regionClose(final TRegion region) throws IOException; + + TFamily familyOpen(final TRegion region, final byte[] familyName) throws IOException; + void familyClose(final TRegion region, final TFamily family) throws IOException; + + void storeFile(final TRegion region, final TFamily family, final StoreFileInfo storeFile) + throws IOException; + } + + private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws IOException { + switch (getSnapshotFormat(desc)) { + case SnapshotManifestV1.DESCRIPTOR_VERSION: + return new SnapshotManifestV1.ManifestBuilder(conf, fs, workingDir); + case SnapshotManifestV2.DESCRIPTOR_VERSION: + return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir); + default: + throw new CorruptedSnapshotException("Invalid Snapshot version: "+ desc.getVersion(), desc); + } + } + + /** + * Creates a 'manifest' for the specified region, by reading directly from the HRegion object. + * This is used by the "online snapshot" when the table is enabled. + */ + public void addRegion(final HRegion region) throws IOException { + // 0. Get the ManifestBuilder/RegionVisitor + RegionVisitor visitor = createRegionVisitor(desc); + + // 1. dump region meta info into the snapshot directory + LOG.debug("Storing '" + region + "' region-info for snapshot."); + Object regionData = visitor.regionOpen(region.getRegionInfo()); + monitor.rethrowException(); + + // 2. iterate through all the stores in the region + LOG.debug("Creating references for hfiles"); + + for (Store store : region.getStores().values()) { + // 2.1. build the snapshot reference for the store + Object familyData = visitor.familyOpen(regionData, store.getFamily().getName()); + monitor.rethrowException(); + + List storeFiles = new ArrayList(store.getStorefiles()); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + } + + // 2.2. iterate through all the store's files and create "references". + for (int i = 0, sz = storeFiles.size(); i < sz; i++) { + StoreFile storeFile = storeFiles.get(i); + monitor.rethrowException(); + + // create "reference" to this store file. + LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath()); + visitor.storeFile(regionData, familyData, storeFile.getFileInfo()); + } + visitor.familyClose(regionData, familyData); + } + visitor.regionClose(regionData); + } + + /** + * Creates a 'manifest' for the specified region, by reading directly from the disk. + * This is used by the "offline snapshot" when the table is disabled. + */ + public void addRegion(final Path tableDir, final HRegionInfo regionInfo) throws IOException { + // 0. Get the ManifestBuilder/RegionVisitor + RegionVisitor visitor = createRegionVisitor(desc); + + // Open the RegionFS + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, + tableDir, regionInfo, true); + monitor.rethrowException(); + + // 1. dump region meta info into the snapshot directory + LOG.debug("Storing region-info for snapshot."); + Object regionData = visitor.regionOpen(regionInfo); + monitor.rethrowException(); + + // 2. iterate through all the stores in the region + LOG.debug("Creating references for hfiles"); + + // This ensures that we have an atomic view of the directory as long as we have < ls limit + // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in + // batches and may miss files being added/deleted. This could be more robust (iteratively + // checking to see if we have all the files until we are sure), but the limit is currently 1000 + // files/batch, far more than the number of store files under a single column family. + Collection familyNames = regionFs.getFamilies(); + if (familyNames != null) { + for (String familyName: familyNames) { + Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName)); + monitor.rethrowException(); + + Collection storeFiles = regionFs.getStoreFiles(familyName); + if (storeFiles == null) { + LOG.debug("No files under family: " + familyName); + continue; + } + + // 2.1. build the snapshot reference for the store + if (LOG.isDebugEnabled()) { + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + } + + // 2.2. iterate through all the store's files and create "references". + int i = 0; + int sz = storeFiles.size(); + for (StoreFileInfo storeFile: storeFiles) { + monitor.rethrowException(); + + // create "reference" to this store file. + LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath()); + visitor.storeFile(regionData, familyData, storeFile); + } + visitor.familyClose(regionData, familyData); + } + } + visitor.regionClose(regionData); + } + + /** + * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() + * + * If the format is v2 and there is no data-manifest, means that we are loading an + * in-progress snapshot. Since we support rolling-upgrades, we loook for v1 and v2 + * regions format. + */ + private void load() throws IOException { + switch (getSnapshotFormat(desc)) { + case SnapshotManifestV1.DESCRIPTOR_VERSION: + this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir); + this.regionManifests = SnapshotManifestV1.loadRegionManifests(conf, fs, workingDir, desc); + break; + case SnapshotManifestV2.DESCRIPTOR_VERSION: + SnapshotDataManifest dataManifest = readDataManifest(); + if (dataManifest != null) { + htd = HTableDescriptor.convert(dataManifest.getTableSchema()); + regionManifests = dataManifest.getRegionManifestsList(); + } else { + // Compatibility, load the v1 regions + // This happens only when the snapshot is in-progress and the cache wants to refresh. + List v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, fs, workingDir, desc); + List v2Regions = + SnapshotManifestV2.loadRegionManifests(conf, fs, workingDir, desc); + if (v1Regions != null && v2Regions != null) { + regionManifests = + new ArrayList(v1Regions.size() + v2Regions.size()); + regionManifests.addAll(v1Regions); + regionManifests.addAll(v2Regions); + } else if (v1Regions != null) { + regionManifests = v1Regions; + } else /* if (v2Regions != null) */ { + regionManifests = v2Regions; + } + } + break; + default: + throw new CorruptedSnapshotException("Invalid Snapshot version: "+ desc.getVersion(), desc); + } + } + + /** + * Get the current snapshot working dir + */ + public Path getSnapshotDir() { + return this.workingDir; + } + + /** + * Get the SnapshotDescription + */ + public SnapshotDescription getSnapshotDescription() { + return this.desc; + } + + /** + * Get the table descriptor from the Snapshot + */ + public HTableDescriptor getTableDescriptor() { + return this.htd; + } + + /** + * Get all the Region Manifest from the snapshot + */ + public List getRegionManifests() { + return this.regionManifests; + } + + /** + * Get all the Region Manifest from the snapshot. + * This is an helper to get a map with the region encoded name + */ + public Map getRegionManifestsMap() { + if (regionManifests == null || regionManifests.size() == 0) return null; + + HashMap regionsMap = + new HashMap(regionManifests.size()); + for (SnapshotRegionManifest manifest: regionManifests) { + String regionName = getRegionNameFromManifest(manifest); + regionsMap.put(regionName, manifest); + } + return regionsMap; + } + + public void consolidate() throws IOException { + if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) { + Path rootDir = FSUtils.getRootDir(conf); + LOG.info("Using old Snapshot Format"); + // write a copy of descriptor to the snapshot directory + new FSTableDescriptors(fs, rootDir) + .createTableDescriptorForTableDirectory(workingDir, htd, false); + } else { + LOG.debug("Convert to Single Snapshot Manifest"); + convertToV2SingleManifest(); + } + } + + /* + * In case of rolling-upgrade, we try to read all the formats and build + * the snapshot with the latest format. + */ + private void convertToV2SingleManifest() throws IOException { + // Try to load v1 and v2 regions + List v1Regions = + SnapshotManifestV1.loadRegionManifests(conf, fs, workingDir, desc); + List v2Regions = + SnapshotManifestV2.loadRegionManifests(conf, fs, workingDir, desc); + + SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder(); + dataManifestBuilder.setTableSchema(htd.convert()); + + if (v1Regions != null && v1Regions.size() > 0) { + dataManifestBuilder.addAllRegionManifests(v1Regions); + } + if (v2Regions != null && v2Regions.size() > 0) { + dataManifestBuilder.addAllRegionManifests(v2Regions); + } + + // Write the v2 Data Manifest. + // Once the data-manifest is written, the snapshot can be considered complete. + // Currently snapshots are written in a "temporary" directory and later + // moved to the "complated" snapshot directory. + SnapshotDataManifest dataManifest = dataManifestBuilder.build(); + writeDataManifest(dataManifest); + this.regionManifests = dataManifest.getRegionManifestsList(); + + // Remove the region manifests. Everything is now in the data-manifest. + // The delete operation is "relaxed", unless we get an exception we keep going. + // The extra files in the snapshot directory will not give any problem, + // since they have the same content as the data manifest, and even by re-reading + // them we will get the same information. + if (v1Regions != null && v1Regions.size() > 0) { + for (SnapshotRegionManifest regionManifest: v1Regions) { + SnapshotManifestV1.deleteRegionManifest(fs, workingDir, regionManifest); + } + } + if (v2Regions != null && v2Regions.size() > 0) { + for (SnapshotRegionManifest regionManifest: v2Regions) { + SnapshotManifestV2.deleteRegionManifest(fs, workingDir, regionManifest); + } + } + } + + /* + * Write the SnapshotDataManifest file + */ + private void writeDataManifest(final SnapshotDataManifest manifest) + throws IOException { + FSDataOutputStream stream = fs.create(new Path(workingDir, DATA_MANIFEST_NAME)); + try { + manifest.writeTo(stream); + } finally { + stream.close(); + } + } + + /* + * Read the SnapshotDataManifest file + */ + private SnapshotDataManifest readDataManifest() throws IOException { + FSDataInputStream in = null; + try { + in = fs.open(new Path(workingDir, DATA_MANIFEST_NAME)); + return SnapshotDataManifest.parseFrom(in); + } catch (FileNotFoundException e) { + return null; + } finally { + if (in != null) in.close(); + } + } + + /** + * Extract the region encoded name from the region manifest + */ + static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) { + byte[] regionName = HRegionInfo.createRegionName( + ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()), + manifest.getRegionInfo().getStartKey().toByteArray(), + manifest.getRegionInfo().getRegionId(), true); + return HRegionInfo.encodeRegionName(regionName); + } + + /* + * Return the snapshot format + */ + private static int getSnapshotFormat(final SnapshotDescription desc) { + return desc.hasVersion() ? desc.getVersion() : SnapshotManifestV1.DESCRIPTOR_VERSION; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java new file mode 100644 index 0000000..ea9ffe4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.snapshot; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; + +import com.google.protobuf.HBaseZeroCopyByteString; + +/** + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. + * + * Snapshot v1 layout format + * - Each region in the table is represented by a directory with the .hregioninfo file + * /snapshotName/regionName/.hregioninfo + * - Each file present in the table is represented by an empty file + * /snapshotName/regionName/familyName/fileName + */ +@InterfaceAudience.Private +public class SnapshotManifestV1 { + private static final Log LOG = LogFactory.getLog(SnapshotManifestV1.class); + + public static final int DESCRIPTOR_VERSION = 0; + + private SnapshotManifestV1() { + } + + static class ManifestBuilder implements SnapshotManifest.RegionVisitor< + HRegionFileSystem, Path> { + private final Configuration conf; + private final Path snapshotDir; + private final FileSystem fs; + + public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) { + this.snapshotDir = snapshotDir; + this.conf = conf; + this.fs = fs; + } + + public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException { + HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf, + fs, snapshotDir, regionInfo); + return snapshotRegionFs; + } + + public void regionClose(final HRegionFileSystem region) { + } + + public Path familyOpen(final HRegionFileSystem snapshotRegionFs, final byte[] familyName) { + Path familyDir = snapshotRegionFs.getStoreDir(Bytes.toString(familyName)); + return familyDir; + } + + public void familyClose(final HRegionFileSystem region, final Path family) { + } + + public void storeFile(final HRegionFileSystem region, final Path familyDir, + final StoreFileInfo storeFile) throws IOException { + Path referenceFile = new Path(familyDir, storeFile.getPath().getName()); + boolean success = true; + if (storeFile.isReference()) { + // write the Reference object to the snapshot + storeFile.getReference().write(fs, referenceFile); + } else { + // create "reference" to this store file. It is intentionally an empty file -- all + // necessary information is captured by its fs location and filename. This allows us to + // only figure out what needs to be done via a single nn operation (instead of having to + // open and read the files as well). + success = fs.createNewFile(referenceFile); + } + if (!success) { + throw new IOException("Failed to create reference file:" + referenceFile); + } + } + } + + static List loadRegionManifests(final Configuration conf, + final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) + throws IOException { + FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); + if (regions == null) { + LOG.info("No regions under directory:" + snapshotDir); + return null; + } + + List regionsManifest = + new ArrayList(regions.length); + + for (FileStatus region: regions) { + HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath()); + regionsManifest.add(buildManifestFromDisk(conf, fs, snapshotDir, hri)); + } + + return regionsManifest; + } + + static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, + final SnapshotRegionManifest manifest) throws IOException { + String regionName = SnapshotManifest.getRegionNameFromManifest(manifest); + fs.delete(new Path(snapshotDir, regionName), true); + } + + static SnapshotRegionManifest buildManifestFromDisk (final Configuration conf, + final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException { + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, + tableDir, regionInfo, true); + SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); + + // 1. dump region meta info into the snapshot directory + LOG.debug("Storing region-info for snapshot."); + manifest.setRegionInfo(HRegionInfo.convert(regionInfo)); + + // 2. iterate through all the stores in the region + LOG.debug("Creating references for hfiles"); + + // This ensures that we have an atomic view of the directory as long as we have < ls limit + // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in + // batches and may miss files being added/deleted. This could be more robust (iteratively + // checking to see if we have all the files until we are sure), but the limit is currently 1000 + // files/batch, far more than the number of store files under a single column family. + Collection familyNames = regionFs.getFamilies(); + if (familyNames != null) { + for (String familyName: familyNames) { + Collection storeFiles = regionFs.getStoreFiles(familyName, false); + if (storeFiles == null) { + LOG.debug("No files under family: " + familyName); + continue; + } + + // 2.1. build the snapshot reference for the store + SnapshotRegionManifest.FamilyFiles.Builder family = + SnapshotRegionManifest.FamilyFiles.newBuilder(); + family.setFamilyName(HBaseZeroCopyByteString.wrap(Bytes.toBytes(familyName))); + + if (LOG.isDebugEnabled()) { + LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); + } + + // 2.2. iterate through all the store's files and create "references". + int i = 0; + int sz = storeFiles.size(); + for (StoreFileInfo storeFile: storeFiles) { + // create "reference" to this store file. + LOG.debug("Adding reference for file ("+ (++i) +"/" + sz + "): " + storeFile.getPath()); + SnapshotRegionManifest.StoreFile.Builder sfManifest = + SnapshotRegionManifest.StoreFile.newBuilder(); + sfManifest.setName(storeFile.getPath().getName()); + family.addStoreFiles(sfManifest.build()); + } + manifest.addFamilyFiles(family.build()); + } + } + return manifest.build(); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java new file mode 100644 index 0000000..01aacc0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.snapshot; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; + +import com.google.protobuf.HBaseZeroCopyByteString; + +/** + * DO NOT USE DIRECTLY. USE {@link SnapshotManifest}. + * + * Snapshot v2 layout format + * - Single Manifest file containing all the information of regions + * - In the online-snapshot case each region will write a "region manifest" + * /snapshotName/manifest.regionName + */ +@InterfaceAudience.Private +public class SnapshotManifestV2 { + private static final Log LOG = LogFactory.getLog(SnapshotManifestV2.class); + + public static final int DESCRIPTOR_VERSION = 2; + + private static final String SNAPSHOT_MANIFEST_PREFIX = "region-manifest."; + + static class ManifestBuilder implements SnapshotManifest.RegionVisitor< + SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> { + private final Configuration conf; + private final Path snapshotDir; + private final FileSystem fs; + + public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) { + this.snapshotDir = snapshotDir; + this.conf = conf; + this.fs = fs; + } + + public SnapshotRegionManifest.Builder regionOpen(final HRegionInfo regionInfo) { + SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder(); + manifest.setRegionInfo(HRegionInfo.convert(regionInfo)); + return manifest; + } + + public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException { + SnapshotRegionManifest manifest = region.build(); + FSDataOutputStream stream = fs.create(getRegionManifestPath(snapshotDir, manifest)); + try { + manifest.writeTo(stream); + } finally { + stream.close(); + } + } + + public SnapshotRegionManifest.FamilyFiles.Builder familyOpen( + final SnapshotRegionManifest.Builder region, final byte[] familyName) { + SnapshotRegionManifest.FamilyFiles.Builder family = + SnapshotRegionManifest.FamilyFiles.newBuilder(); + family.setFamilyName(HBaseZeroCopyByteString.wrap(familyName)); + return family; + } + + public void familyClose(final SnapshotRegionManifest.Builder region, + final SnapshotRegionManifest.FamilyFiles.Builder family) { + region.addFamilyFiles(family.build()); + } + + public void storeFile(final SnapshotRegionManifest.Builder region, + final SnapshotRegionManifest.FamilyFiles.Builder family, final StoreFileInfo storeFile) { + SnapshotRegionManifest.StoreFile.Builder sfManifest = + SnapshotRegionManifest.StoreFile.newBuilder(); + sfManifest.setName(storeFile.getPath().getName()); + if (storeFile.isReference()) { + sfManifest.setReference(storeFile.getReference().convert()); + } + sfManifest.setFileSize(storeFile.getFileStatus().getLen()); + family.addStoreFiles(sfManifest.build()); + } + } + + static List loadRegionManifests(final Configuration conf, + final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) + throws IOException { + FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() { + @Override + public boolean accept(Path path) { + return path.getName().startsWith(SNAPSHOT_MANIFEST_PREFIX); + } + }); + + if (manifestFiles == null || manifestFiles.length == 0) return null; + + ArrayList manifests = + new ArrayList(manifestFiles.length); + for (FileStatus st: manifestFiles) { + FSDataInputStream stream = fs.open(st.getPath()); + try { + manifests.add(SnapshotRegionManifest.parseFrom(stream)); + } finally { + stream.close(); + } + } + return manifests; + } + + static void deleteRegionManifest(final FileSystem fs, final Path snapshotDir, + final SnapshotRegionManifest manifest) throws IOException { + fs.delete(getRegionManifestPath(snapshotDir, manifest), true); + } + + private static Path getRegionManifestPath(final Path snapshotDir, + final SnapshotRegionManifest manifest) { + String regionName = SnapshotManifest.getRegionNameFromManifest(manifest); + return new Path(snapshotDir, SNAPSHOT_MANIFEST_PREFIX + regionName); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java index dc977bb..5a5a323 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java @@ -27,15 +27,20 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSVisitor; @@ -44,7 +49,14 @@ import org.apache.hadoop.hbase.util.FSVisitor; */ @InterfaceAudience.Private public final class SnapshotReferenceUtil { - public interface FileVisitor extends FSVisitor.StoreFileVisitor, + public static final Log LOG = LogFactory.getLog(SnapshotReferenceUtil.class); + + public interface StoreFileVisitor { + void storeFile(final HRegionInfo regionInfo, final String familyName, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException; + } + + public interface SnapshotVisitor extends StoreFileVisitor, FSVisitor.RecoveredEditsVisitor, FSVisitor.LogFileVisitor { } @@ -89,14 +101,33 @@ public final class SnapshotReferenceUtil { /** * Iterate over the snapshot store files, restored.edits and logs * + * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory * @param visitor callback object to get the referenced files * @throws IOException if an error occurred while scanning the directory */ - public static void visitReferencedFiles(final FileSystem fs, final Path snapshotDir, - final FileVisitor visitor) throws IOException { - visitTableStoreFiles(fs, snapshotDir, visitor); + public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, + final Path snapshotDir, final SnapshotVisitor visitor) + throws IOException { + SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + visitReferencedFiles(conf, fs, snapshotDir, desc, visitor); + } + + /** + * Iterate over the snapshot store files, restored.edits and logs + * + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} + * @param snapshotDir {@link Path} to the Snapshot directory + * @param desc the {@link SnapshotDescription} of the snapshot to verify + * @param visitor callback object to get the referenced files + * @throws IOException if an error occurred while scanning the directory + */ + public static void visitReferencedFiles(final Configuration conf, final FileSystem fs, + final Path snapshotDir, final SnapshotDescription desc, final SnapshotVisitor visitor) + throws IOException { + visitTableStoreFiles(conf, fs, snapshotDir, desc, visitor); visitRecoveredEdits(fs, snapshotDir, visitor); visitLogFiles(fs, snapshotDir, visitor); } @@ -104,27 +135,44 @@ public final class SnapshotReferenceUtil { /** * Iterate over the snapshot store files * + * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory + * @param desc the {@link SnapshotDescription} of the snapshot to verify * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ - public static void visitTableStoreFiles(final FileSystem fs, final Path snapshotDir, - final FSVisitor.StoreFileVisitor visitor) throws IOException { - FSVisitor.visitTableStoreFiles(fs, snapshotDir, visitor); + public static void visitTableStoreFiles(final Configuration conf, final FileSystem fs, + final Path snapshotDir, final SnapshotDescription desc, final StoreFileVisitor visitor) + throws IOException { + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc); + List regionManifests = manifest.getRegionManifests(); + if (regionManifests == null || regionManifests.size() == 0) { + LOG.debug("No manifest files present: " + snapshotDir); + return; + } + + for (SnapshotRegionManifest regionManifest: regionManifests) { + visitRegionStoreFiles(regionManifest, visitor); + } } /** * Iterate over the snapshot store files in the specified region * - * @param fs {@link FileSystem} - * @param regionDir {@link Path} to the Snapshot region directory + * @param manifest snapshot manifest to inspect * @param visitor callback object to get the store files * @throws IOException if an error occurred while scanning the directory */ - public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir, - final FSVisitor.StoreFileVisitor visitor) throws IOException { - FSVisitor.visitRegionStoreFiles(fs, regionDir, visitor); + public static void visitRegionStoreFiles(final SnapshotRegionManifest manifest, + final StoreFileVisitor visitor) throws IOException { + HRegionInfo regionInfo = HRegionInfo.convert(manifest.getRegionInfo()); + for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { + String familyName = familyFiles.getFamilyName().toStringUtf8(); + for (SnapshotRegionManifest.StoreFile storeFile: familyFiles.getStoreFilesList()) { + visitor.storeFile(regionInfo, familyName, storeFile); + } + } } /** @@ -165,85 +213,125 @@ public final class SnapshotReferenceUtil { */ public static void verifySnapshot(final Configuration conf, final FileSystem fs, final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { - final TableName table = TableName.valueOf(snapshotDesc.getTable()); - visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - HFileLink link = HFileLink.create(conf, table, region, family, hfile); - try { - link.getFileStatus(fs); - } catch (FileNotFoundException e) { - throw new CorruptedSnapshotException("Corrupted snapshot '" + snapshotDesc + "'", e); - } + visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile); } }); } /** - * Returns the set of region names available in the snapshot. + * Verify the validity of the snapshot * + * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} - * @param snapshotDir {@link Path} to the Snapshot directory + * @param manifest snapshot manifest to inspect + * @throws CorruptedSnapshotException if the snapshot is corrupted * @throws IOException if an error occurred while scanning the directory - * @return the set of the regions contained in the snapshot */ - public static Set getSnapshotRegionNames(final FileSystem fs, final Path snapshotDir) - throws IOException { - FileStatus[] regionDirs = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); - if (regionDirs == null) return null; + public static void verifySnapshot(final Configuration conf, final FileSystem fs, + final SnapshotManifest manifest) throws IOException { + final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription(); + final Path snapshotDir = manifest.getSnapshotDir(); - Set regions = new HashSet(); - for (FileStatus regionDir: regionDirs) { - regions.add(regionDir.getPath().getName()); + List regionManifests = manifest.getRegionManifests(); + if (regionManifests == null || regionManifests.size() == 0) { + LOG.debug("No manifest files present: " + snapshotDir); + return; + } + + for (SnapshotRegionManifest regionManifest: regionManifests) { + visitRegionStoreFiles(regionManifest, new StoreFileVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + verifyStoreFile(conf, fs, snapshotDir, snapshotDesc, regionInfo, family, storeFile); + } + }); } - return regions; } /** - * Get the list of hfiles for the specified snapshot region. - * NOTE: The current implementation keeps one empty file per HFile in the region. - * The file name matches the one in the original table, and by reconstructing - * the path you can quickly jump to the referenced file. + * Verify the validity of the snapshot store file * + * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} - * @param snapshotRegionDir {@link Path} to the Snapshot region directory - * @return Map of hfiles per family, the key is the family name and values are hfile names + * @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify + * @param snapshot the {@link SnapshotDescription} of the snapshot to verify + * @param regionInfo {@link HRegionInfo} of the region that contains the store file + * @param family family that contains the store file + * @param storeFile the store file to verify + * @throws CorruptedSnapshotException if the snapshot is corrupted * @throws IOException if an error occurred while scanning the directory */ - public static Map> getRegionHFileReferences(final FileSystem fs, - final Path snapshotRegionDir) throws IOException { - final Map> familyFiles = new TreeMap>(); + public static void verifyStoreFile(final Configuration conf, final FileSystem fs, + final Path snapshotDir, final SnapshotDescription snapshot, final HRegionInfo regionInfo, + final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + String fileName = storeFile.getName(); - visitRegionStoreFiles(fs, snapshotRegionDir, - new FSVisitor.StoreFileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - List hfiles = familyFiles.get(family); - if (hfiles == null) { - hfiles = new LinkedList(); - familyFiles.put(family, hfiles); - } - hfiles.add(hfile); - } - }); + Path refPath = null; + if (StoreFileInfo.isReference(fileName)) { + // If is a reference file check if the parent file is present in the snapshot + Path snapshotHFilePath = new Path(new Path( + new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName); + refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath); + if (!fs.exists(refPath)) { + throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot); + } + } + + Path linkPath; + if (refPath != null && HFileLink.isHFileLink(refPath)) { + linkPath = new Path(family, refPath.getName()); + } else if (HFileLink.isHFileLink(fileName)) { + linkPath = new Path(family, fileName); + } else { + linkPath = new Path(family, HFileLink.createHFileLinkName( + TableName.valueOf(snapshot.getTable()), regionInfo.getEncodedName(), fileName)); + } + + // check if the linked file exists (in the archive, or in the table dir) + HFileLink link = new HFileLink(conf, linkPath); + if (!link.exists(fs)) { + throw new CorruptedSnapshotException("Can't find hfile: " + fileName + + " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath() + + ") directory for the primary table.", snapshot); + } + } - return familyFiles; + /** + * Returns the store file names in the snapshot. + * + * @param conf The current {@link Configuration} instance. + * @param fs {@link FileSystem} + * @param snapshotDir {@link Path} to the Snapshot directory + * @throws IOException if an error occurred while scanning the directory + * @return the names of hfiles in the specified snaphot + */ + public static Set getHFileNames(final Configuration conf, final FileSystem fs, + final Path snapshotDir) throws IOException { + SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + return getHFileNames(conf, fs, snapshotDir, desc); } /** * Returns the store file names in the snapshot. * + * @param conf The current {@link Configuration} instance. * @param fs {@link FileSystem} * @param snapshotDir {@link Path} to the Snapshot directory + * @param snapshotDesc the {@link SnapshotDescription} of the snapshot to inspect * @throws IOException if an error occurred while scanning the directory * @return the names of hfiles in the specified snaphot */ - public static Set getHFileNames(final FileSystem fs, final Path snapshotDir) + private static Set getHFileNames(final Configuration conf, final FileSystem fs, + final Path snapshotDir, final SnapshotDescription snapshotDesc) throws IOException { final Set names = new HashSet(); - visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { + visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, new StoreFileVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + String hfile = storeFile.getName(); if (HFileLink.isHFileLink(hfile)) { names.add(HFileLink.getReferencedHFileName(hfile)); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java deleted file mode 100644 index ec50b71..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.snapshot; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.util.FSTableDescriptors; - -/** - * Copy the table info into the snapshot directory - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class TableInfoCopyTask extends SnapshotTask { - - public static final Log LOG = LogFactory.getLog(TableInfoCopyTask.class); - private final FileSystem fs; - private final Path rootDir; - - /** - * Copy the table info for the given table into the snapshot - * @param monitor listen for errors while running the snapshot - * @param snapshot snapshot for which we are copying the table info - * @param fs {@link FileSystem} where the tableinfo is stored (and where the copy will be written) - * @param rootDir root of the {@link FileSystem} where the tableinfo is stored - */ - public TableInfoCopyTask(ForeignExceptionDispatcher monitor, - SnapshotDescription snapshot, FileSystem fs, Path rootDir) { - super(snapshot, monitor); - this.rootDir = rootDir; - this.fs = fs; - } - - @Override - public Void call() throws Exception { - LOG.debug("Running table info copy."); - this.rethrowException(); - LOG.debug("Attempting to copy table info for snapshot:" - + ClientSnapshotDescriptionUtils.toString(this.snapshot)); - // get the HTable descriptor - HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, - TableName.valueOf(this.snapshot.getTable())); - this.rethrowException(); - // write a copy of descriptor to the snapshot directory - Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); - new FSTableDescriptors(fs, rootDir) - .createTableDescriptorForTableDirectory(snapshotDir, orig, false); - LOG.debug("Finished copying tableinfo."); - return null; - } -} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java index fdc1834..0b9173b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TakeSnapshotUtils.java @@ -66,34 +66,13 @@ public class TakeSnapshotUtils { * @param regionName encoded name of the region (see {@link HRegionInfo#encodeRegionName(byte[])}) * @return path to the per-region directory for the snapshot */ - public static Path getRegionSnapshotDirectory(SnapshotDescription desc, Path rootDir, + private static Path getRegionSnapshotDirectory(SnapshotDescription desc, Path rootDir, String regionName) { Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); return HRegion.getRegionDir(snapshotDir, regionName); } /** - * Get the snapshot directory for each family to be added to the the snapshot - * @param snapshot description of the snapshot being take - * @param snapshotRegionDir directory in the snapshot where the region directory information - * should be stored - * @param families families to be added (can be null) - * @return paths to the snapshot directory for each family, in the same order as the families - * passed in - */ - public static List getFamilySnapshotDirectories(SnapshotDescription snapshot, - Path snapshotRegionDir, FileStatus[] families) { - if (families == null || families.length == 0) return Collections.emptyList(); - - List familyDirs = new ArrayList(families.length); - for (FileStatus family : families) { - // build the reference directory name - familyDirs.add(new Path(snapshotRegionDir, family.getPath().getName())); - } - return familyDirs; - } - - /** * Create a snapshot timer for the master which notifies the monitor when an error occurs * @param snapshot snapshot to monitor * @param conf configuration to use when getting the max snapshot life @@ -154,51 +133,6 @@ public class TakeSnapshotUtils { } } - /** - * Verify one of a snapshot's region's recovered.edits, has been at the surface (file names, - * length), match the original directory. - * @param fs filesystem on which the snapshot had been taken - * @param rootDir full path to the root hbase directory - * @param regionInfo info for the region - * @param snapshot description of the snapshot that was taken - * @throws IOException if there is an unexpected error talking to the filesystem - */ - public static void verifyRecoveredEdits(FileSystem fs, Path rootDir, HRegionInfo regionInfo, - SnapshotDescription snapshot) throws IOException { - Path regionDir = HRegion.getRegionDir(rootDir, regionInfo); - Path editsDir = HLogUtil.getRegionDirRecoveredEditsDir(regionDir); - Path snapshotRegionDir = TakeSnapshotUtils.getRegionSnapshotDirectory(snapshot, rootDir, - regionInfo.getEncodedName()); - Path snapshotEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir); - - FileStatus[] edits = FSUtils.listStatus(fs, editsDir); - FileStatus[] snapshotEdits = FSUtils.listStatus(fs, snapshotEditsDir); - if (edits == null) { - assertNull(snapshot, "Snapshot has edits but table doesn't", snapshotEdits); - return; - } - - assertNotNull(snapshot, "Table has edits, but snapshot doesn't", snapshotEdits); - - // check each of the files - assertEquals(snapshot, "Not same number of edits in snapshot as table", edits.length, - snapshotEdits.length); - - // make sure we have a file with the same name as the original - // it would be really expensive to verify the content matches the original - for (FileStatus edit : edits) { - for (FileStatus sEdit : snapshotEdits) { - if (sEdit.getPath().equals(edit.getPath())) { - assertEquals(snapshot, "Snapshot file" + sEdit.getPath() - + " length not equal to the original: " + edit.getPath(), edit.getLen(), - sEdit.getLen()); - break; - } - } - assertTrue(snapshot, "No edit in snapshot with name:" + edit.getPath(), false); - } - } - private static void assertNull(SnapshotDescription snapshot, String msg, Object isNull) throws CorruptedSnapshotException { if (isNull != null) { @@ -237,21 +171,6 @@ public class TakeSnapshotUtils { } /** - * Assert that the expect matches the gotten amount - * @param msg message to add the to exception - * @param expected - * @param gotten - * @throws CorruptedSnapshotException thrown if the two elements don't match - */ - private static void assertEquals(SnapshotDescription snapshot, String msg, long expected, - long gotten) throws CorruptedSnapshotException { - if (expected != gotten) { - throw new CorruptedSnapshotException(msg + ". Expected:" + expected + ", got:" + gotten, - snapshot); - } - } - - /** * @param logdir * @param toInclude list of servers to include. If empty or null, returns all servers * @return maps of servers to all their log files. If there is no log directory, returns diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java index fdc9f29..b7e1734 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java @@ -129,7 +129,7 @@ public final class FSVisitor { final FSVisitor.RecoveredEditsVisitor visitor) throws IOException { FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { - LOG.info("No regions under directory:" + tableDir); + LOG.info("No recoveredEdits regions under directory:" + tableDir); return; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 3195abe..c89aed9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -201,8 +202,16 @@ public class TestSnapshotFromClient { FSUtils.getRootDir(UTIL.getConfiguration()), LOG); // take a snapshot of the disabled table - byte[] snapshot = Bytes.toBytes("offlineTableSnapshot"); - admin.snapshot(snapshot, TABLE_NAME); + final String SNAPSHOT_NAME = "offlineTableSnapshot"; + byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME); + + SnapshotDescription desc = SnapshotDescription.newBuilder() + .setType(SnapshotDescription.Type.DISABLED) + .setTable(STRING_TABLE_NAME) + .setName(SNAPSHOT_NAME) + .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION) + .build(); + admin.snapshot(desc); LOG.debug("Snapshot completed."); // make sure we have the snapshot diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 277a458..4d0ecfe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.junit.After; import org.junit.Assert; import org.junit.Test; @@ -94,7 +95,7 @@ public class TestTableSnapshotScanner { HTable table = new HTable(util.getConfiguration(), tableName); util.loadTable(table, FAMILIES); - Path rootDir = new Path(util.getConfiguration().get(HConstants.HBASE_DIR)); + Path rootDir = FSUtils.getRootDir(util.getConfiguration()); FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, @@ -124,7 +125,7 @@ public class TestTableSnapshotScanner { testScanner(UTIL, "testWithMultiRegion", 20, true); } - private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions, + private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions, boolean shutdownCluster) throws Exception { setupCluster(); TableName tableName = TableName.valueOf("testScanner"); @@ -138,7 +139,7 @@ public class TestTableSnapshotScanner { Path restoreDir = util.getDataTestDirOnTestFS(snapshotName); Scan scan = new Scan(bbb, yyy); // limit the scan - TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, + TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, snapshotName, scan); verifyScanner(scanner, bbb, yyy); @@ -155,7 +156,7 @@ public class TestTableSnapshotScanner { private void verifyScanner(ResultScanner scanner, byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { - HBaseTestingUtility.SeenRowTracker rowTracker = + HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow); while (true) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index e02d504..8f95866 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotR import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; @@ -125,17 +126,17 @@ public class TestTableSnapshotInputFormat { Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2); - Assert.assertEquals(Lists.newArrayList("h1", "h2"), + Assert.assertEquals(Lists.newArrayList("h1", "h2"), tsif.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3); - Assert.assertEquals(Lists.newArrayList("h2", "h1"), + Assert.assertEquals(Lists.newArrayList("h2", "h1"), tsif.getBestLocations(conf, blockDistribution)); blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6); blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9); - Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"), + Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"), tsif.getBestLocations(conf, blockDistribution)); } @@ -156,7 +157,7 @@ public class TestTableSnapshotInputFormat { public static class TestTableSnapshotReducer extends Reducer { - HBaseTestingUtility.SeenRowTracker rowTracker = + HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(bbb, yyy); @Override protected void reduce(ImmutableBytesWritable key, Iterable values, @@ -191,7 +192,7 @@ public class TestTableSnapshotInputFormat { HTable table = new HTable(util.getConfiguration(), tableName); util.loadTable(table, FAMILIES); - Path rootDir = new Path(util.getConfiguration().get(HConstants.HBASE_DIR)); + Path rootDir = FSUtils.getRootDir(util.getConfiguration()); FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, @@ -250,7 +251,7 @@ public class TestTableSnapshotInputFormat { testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 8); } - public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, + public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits) throws Exception { setupCluster(); TableName tableName = TableName.valueOf("testWithMockedMapReduce"); @@ -282,7 +283,7 @@ public class TestTableSnapshotInputFormat { Assert.assertEquals(expectedNumSplits, splits.size()); - HBaseTestingUtility.SeenRowTracker rowTracker = + HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow); for (int i = 0; i < splits.size(); i++) { @@ -293,7 +294,7 @@ public class TestTableSnapshotInputFormat { // validate record reader TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class); when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration()); - RecordReader rr = + RecordReader rr = tsif.createRecordReader(split, taskAttemptContext); rr.initialize(split, taskAttemptContext); @@ -311,7 +312,7 @@ public class TestTableSnapshotInputFormat { rowTracker.validate(); } - public static void verifyRowFromMap(ImmutableBytesWritable key, Result result) + public static void verifyRowFromMap(ImmutableBytesWritable key, Result result) throws IOException { byte[] row = key.get(); CellScanner scanner = result.cellScanner(); @@ -363,7 +364,7 @@ public class TestTableSnapshotInputFormat { // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int expectedNumSplits, + String snapshotName, Path tableDir, int numRegions, int expectedNumSplits, boolean shutdownCluster) throws Exception { //create the table and snapshot @@ -379,7 +380,7 @@ public class TestTableSnapshotInputFormat { Scan scan = new Scan(bbb, yyy); // limit the scan job.setJarByClass(util.getClass()); - TableMapReduceUtil.addDependencyJars(job.getConfiguration(), + TableMapReduceUtil.addDependencyJars(job.getConfiguration(), TestTableSnapshotInputFormat.class); TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index e62b788..f1c5adc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.Bytes; @@ -330,19 +332,20 @@ public class TestSnapshotFromMaster { // get the snapshot files for the table Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); - Path[] snapshotHFiles = SnapshotTestingUtils.listHFiles(fs, snapshotTable); + Set snapshotHFiles = SnapshotReferenceUtil.getHFileNames( + UTIL.getConfiguration(), fs, snapshotTable); // check that the files in the archive contain the ones that we need for the snapshot LOG.debug("Have snapshot hfiles:"); - for (Path file : snapshotHFiles) { - LOG.debug(file); + for (String fileName : snapshotHFiles) { + LOG.debug(fileName); } // get the archived files for the table Collection files = getArchivedHFiles(archiveDir, rootDir, fs, TABLE_NAME); // and make sure that there is a proper subset - for (Path file : snapshotHFiles) { - assertTrue("Archived hfiles " + files + " is missing snapshot file:" + file, - files.contains(file.getName())); + for (String fileName : snapshotHFiles) { + assertTrue("Archived hfiles " + files + " is missing snapshot file:" + fileName, + files.contains(fileName)); } // delete the existing snapshot diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java index 409f697..9f69bd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java @@ -22,18 +22,29 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Collection; +import java.util.ArrayList; import java.util.HashSet; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.After; import org.junit.AfterClass; @@ -41,6 +52,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.HBaseZeroCopyByteString; + /** * Test that we correctly reload the cache, filter directories, etc. */ @@ -49,6 +62,7 @@ public class TestSnapshotFileCache { private static final Log LOG = LogFactory.getLog(TestSnapshotFileCache.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static long sequenceId = 0; private static FileSystem fs; private static Path rootDir; @@ -72,86 +86,17 @@ public class TestSnapshotFileCache { } @Test(timeout = 10000000) - public void testLoadAndDelete() throws Exception { + public void testLoadAndDelete() throws IOException { // don't refresh the cache unless we tell it to long period = Long.MAX_VALUE; - Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); - Path snapshot = new Path(snapshotDir, "snapshot"); - Path region = new Path(snapshot, "7e91021"); - Path family = new Path(region, "fam"); - Path file1 = new Path(family, "file1"); - Path file2 = new Path(family, "file2"); - - // create two hfiles under the snapshot - fs.createNewFile(file1); - fs.createNewFile(file2); - - FSUtils.logFileSystemState(fs, rootDir, LOG); - - // then make sure the cache finds them - assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName())); - assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName())); - String not = "file-shouldn't-be-found"; - assertFalse("Cache found '" + not + "', but it shouldn't have.", cache.contains(not)); - - // make sure we get a little bit of separation in the modification times - // its okay if we sleep a little longer (b/c of GC pause), as long as we sleep a little - Thread.sleep(10); + createAndTestSnapshotV1(cache, "snapshot1a", false, true); + createAndTestSnapshotV1(cache, "snapshot1b", true, true); - LOG.debug("Deleting snapshot."); - // then delete the snapshot and make sure that we can still find the files - if (!fs.delete(snapshot, true)) { - throw new IOException("Couldn't delete " + snapshot + " for an unknown reason."); - } - FSUtils.logFileSystemState(fs, rootDir, LOG); - - - LOG.debug("Checking to see if file is deleted."); - assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName())); - assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName())); - - // then trigger a refresh - cache.triggerCacheRefreshForTesting(); - // and not it shouldn't find those files - assertFalse("Cache found '" + file1 + "', but it shouldn't have.", - cache.contains(file1.getName())); - assertFalse("Cache found '" + file2 + "', but it shouldn't have.", - cache.contains(file2.getName())); - - fs.delete(snapshotDir, true); - } - - @Test - public void testLoadsTmpDir() throws Exception { - // don't refresh the cache unless we tell it to - long period = Long.MAX_VALUE; - Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); - SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000, - "test-snapshot-file-cache-refresh", new SnapshotFiles()); - - // create a file in a 'completed' snapshot - Path snapshot = new Path(snapshotDir, "snapshot"); - Path region = new Path(snapshot, "7e91021"); - Path family = new Path(region, "fam"); - Path file1 = new Path(family, "file1"); - fs.createNewFile(file1); - - // create an 'in progress' snapshot - SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build(); - snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); - region = new Path(snapshot, "7e91021"); - family = new Path(region, "fam"); - Path file2 = new Path(family, "file2"); - fs.createNewFile(file2); - - FSUtils.logFileSystemState(fs, rootDir, LOG); - - // then make sure the cache finds both files - assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName())); - assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName())); + createAndTestSnapshotV2(cache, "snapshot2a", false, true); + createAndTestSnapshotV2(cache, "snapshot2b", true, true); } @Test @@ -168,10 +113,10 @@ public class TestSnapshotFileCache { }); // create a file in a 'completed' snapshot - Path snapshot = new Path(snapshotDir, "snapshot"); - Path region = new Path(snapshot, "7e91021"); - Path family = new Path(region, "fam"); - Path file1 = new Path(family, "file1"); + SnapshotDescription desc = SnapshotDescription.newBuilder().setName("snapshot").build(); + Path snapshot = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir); + SnapshotDescriptionUtils.writeSnapshotInfo(desc, snapshot, fs); + Path file1 = new Path(new Path(new Path(snapshot, "7e91021"), "fam"), "file1"); fs.createNewFile(file1); // and another file in the logs directory @@ -191,61 +136,103 @@ public class TestSnapshotFileCache { public void testReloadModifiedDirectory() throws IOException { // don't refresh the cache unless we tell it to long period = Long.MAX_VALUE; - Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); - Path snapshot = new Path(snapshotDir, "snapshot"); - Path region = new Path(snapshot, "7e91021"); - Path family = new Path(region, "fam"); - Path file1 = new Path(family, "file1"); - Path file2 = new Path(family, "file2"); - - // create two hfiles under the snapshot - fs.createNewFile(file1); - fs.createNewFile(file2); - - FSUtils.logFileSystemState(fs, rootDir, LOG); - - assertTrue("Cache didn't find " + file1, cache.contains(file1.getName())); - + createAndTestSnapshotV1(cache, "snapshot1", false, true); // now delete the snapshot and add a file with a different name - fs.delete(snapshot, true); - Path file3 = new Path(family, "new_file"); - fs.createNewFile(file3); + createAndTestSnapshotV1(cache, "snapshot1", false, false); - FSUtils.logFileSystemState(fs, rootDir, LOG); - assertTrue("Cache didn't find new file:" + file3, cache.contains(file3.getName())); + createAndTestSnapshotV2(cache, "snapshot2", false, true); + // now delete the snapshot and add a file with a different name + createAndTestSnapshotV2(cache, "snapshot2", false, false); } @Test public void testSnapshotTempDirReload() throws IOException { long period = Long.MAX_VALUE; // This doesn't refresh cache until we invoke it explicitly - Path snapshotDir = new Path(SnapshotDescriptionUtils.getSnapshotsDir(rootDir), - SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME); SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000, "test-snapshot-file-cache-refresh", new SnapshotFiles()); - // Add a new snapshot - Path snapshot1 = new Path(snapshotDir, "snapshot1"); - Path file1 = new Path(new Path(new Path(snapshot1, "7e91021"), "fam"), "file1"); - fs.createNewFile(file1); - assertTrue(cache.contains(file1.getName())); + // Add a new non-tmp snapshot + createAndTestSnapshotV1(cache, "snapshot0v1", false, false); + createAndTestSnapshotV1(cache, "snapshot0v2", false, false); + + // Add a new tmp snapshot + createAndTestSnapshotV2(cache, "snapshot1", true, false); - // Add another snapshot - Path snapshot2 = new Path(snapshotDir, "snapshot2"); - Path file2 = new Path(new Path(new Path(snapshot2, "7e91021"), "fam2"), "file2"); - fs.createNewFile(file2); - assertTrue(cache.contains(file2.getName())); + // Add another tmp snapshot + createAndTestSnapshotV2(cache, "snapshot2", true, false); } class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector { public Collection filesUnderSnapshot(final Path snapshotDir) throws IOException { Collection files = new HashSet(); files.addAll(SnapshotReferenceUtil.getHLogNames(fs, snapshotDir)); - files.addAll(SnapshotReferenceUtil.getHFileNames(fs, snapshotDir)); + files.addAll(SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotDir)); return files; } }; + + private void createAndTestSnapshotV1(final SnapshotFileCache cache, final String name, + final boolean tmp, final boolean removeOnExit) throws IOException { + SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir); + SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1(name); + createAndTestSnapshot(cache, builder, tmp, removeOnExit); + } + + private void createAndTestSnapshotV2(final SnapshotFileCache cache, final String name, + final boolean tmp, final boolean removeOnExit) throws IOException { + SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir); + SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(name); + createAndTestSnapshot(cache, builder, tmp, removeOnExit); + } + + private void createAndTestSnapshot(final SnapshotFileCache cache, + final SnapshotMock.SnapshotBuilder builder, + final boolean tmp, final boolean removeOnExit) throws IOException { + List files = new ArrayList(); + for (int i = 0; i < 3; ++i) { + for (Path filePath: builder.addRegion()) { + String fileName = filePath.getName(); + if (tmp) { + // We should be able to find all the files while the snapshot creation is in-progress + FSUtils.logFileSystemState(fs, rootDir, LOG); + assertTrue("Cache didn't find " + fileName, cache.contains(fileName)); + } + files.add(fileName); + } + } + + // Finalize the snapshot + if (!tmp) { + builder.commit(); + } + + // Make sure that all files are still present + for (String fileName: files) { + assertTrue("Cache didn't find " + fileName, cache.contains(fileName)); + } + + FSUtils.logFileSystemState(fs, rootDir, LOG); + if (removeOnExit) { + LOG.debug("Deleting snapshot."); + fs.delete(builder.getSnapshotsDir(), true); + FSUtils.logFileSystemState(fs, rootDir, LOG); + + // The files should be in cache until next refresh + for (String fileName: files) { + assertTrue("Cache didn't find " + fileName, cache.contains(fileName)); + } + + // then trigger a refresh + cache.triggerCacheRefreshForTesting(); + // and not it shouldn't find those files + for (String fileName: files) { + assertFalse("Cache found '" + fileName + "', but it shouldn't have.", + cache.contains(fileName)); + } + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index ac9efff..19617ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.Arrays; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -30,6 +31,8 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -43,11 +46,15 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -56,6 +63,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSVisitor; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Assert; @@ -180,14 +188,14 @@ public class SnapshotTestingUtils { List nonEmptyTestFamilies, List emptyTestFamilies, Path rootDir, HBaseAdmin admin, FileSystem fs, boolean requireLogs, Path logsDir, Set snapshotServers) throws IOException { + final Configuration conf = admin.getConfiguration(); + // check snapshot dir Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir( snapshotDescriptor, rootDir); assertTrue(fs.exists(snapshotDir)); - // check snapshot info - Path snapshotinfo = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - assertTrue(fs.exists(snapshotinfo)); + SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); // check the logs dir if (requireLogs) { @@ -196,18 +204,14 @@ public class SnapshotTestingUtils { HConstants.HREGION_LOGDIR_NAME)); } - // check the table info - HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, tableName); - HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); - assertEquals(desc, snapshotDesc); - // Extract regions and families with store files final Set snapshotRegions = new HashSet(); final Set snapshotFamilies = new TreeSet(Bytes.BYTES_COMPARATOR); - FSVisitor.visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() { - public void storeFile(final String region, final String family, final String hfileName) - throws IOException { - snapshotRegions.add(region); + SnapshotReferenceUtil.visitTableStoreFiles(conf, fs, snapshotDir, desc, + new SnapshotReferenceUtil.StoreFileVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + snapshotRegions.add(regionInfo.getEncodedName()); snapshotFamilies.add(Bytes.toBytes(family)); } }); @@ -237,14 +241,10 @@ public class SnapshotTestingUtils { List regions = admin.getTableRegions(tableName); assertEquals(regions.size(), snapshotRegions.size()); - // Verify Regions + // Verify Regions (redundant check, see MasterSnapshotVerifier) for (HRegionInfo info : regions) { String regionName = info.getEncodedName(); assertTrue(snapshotRegions.contains(regionName)); - - Path regionDir = new Path(snapshotDir, regionName); - HRegionInfo snapshotRegionInfo = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - assertEquals(info, snapshotRegionInfo); } } @@ -351,6 +351,17 @@ public class SnapshotTestingUtils { return hfiles.toArray(new Path[hfiles.size()]); } + public static String[] listHFileNames(final FileSystem fs, final Path tableDir) + throws IOException { + Path[] files = listHFiles(fs, tableDir); + String[] names = new String[files.length]; + for (int i = 0; i < files.length; ++i) { + names[i] = files[i].getName(); + } + Arrays.sort(names); + return names; + } + /** * Take a snapshot of the specified table and verify that the given family is * not empty. Note that this will leave the table disabled @@ -414,10 +425,14 @@ public class SnapshotTestingUtils { final TableName table = TableName.valueOf(snapshotDesc.getTable()); final ArrayList corruptedFiles = new ArrayList(); - SnapshotReferenceUtil.visitTableStoreFiles(fs, snapshotDir, new FSVisitor.StoreFileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { - HFileLink link = HFileLink.create(util.getConfiguration(), table, region, family, hfile); + final Configuration conf = util.getConfiguration(); + SnapshotReferenceUtil.visitTableStoreFiles(conf, fs, snapshotDir, snapshotDesc, + new SnapshotReferenceUtil.StoreFileVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + String region = regionInfo.getEncodedName(); + String hfile = storeFile.getName(); + HFileLink link = HFileLink.create(conf, table, region, family, hfile); if (corruptedFiles.size() % 2 == 0) { fs.delete(link.getAvailablePath(fs)); corruptedFiles.add(hfile); @@ -430,6 +445,191 @@ public class SnapshotTestingUtils { } // ========================================================================== + // Snapshot Mock + // ========================================================================== + public static class SnapshotMock { + private final static String TEST_FAMILY = "cf"; + public final static int TEST_NUM_REGIONS = 4; + + private final Configuration conf; + private final FileSystem fs; + private final Path rootDir; + + static class RegionData { + public HRegionInfo hri; + public Path tableDir; + public Path[] files; + + public RegionData(final Path tableDir, final HRegionInfo hri, final int nfiles) { + this.tableDir = tableDir; + this.hri = hri; + this.files = new Path[nfiles]; + } + } + + public static class SnapshotBuilder { + private final RegionData[] tableRegions; + private final SnapshotDescription desc; + private final HTableDescriptor htd; + private final Configuration conf; + private final FileSystem fs; + private final Path rootDir; + private Path snapshotDir; + private int snapshotted = 0; + + public SnapshotBuilder(final Configuration conf, final FileSystem fs, + final Path rootDir, final HTableDescriptor htd, + final SnapshotDescription desc, final RegionData[] tableRegions) + throws IOException { + this.fs = fs; + this.conf = conf; + this.rootDir = rootDir; + this.htd = htd; + this.desc = desc; + this.tableRegions = tableRegions; + this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); + new FSTableDescriptors(conf) + .createTableDescriptorForTableDirectory(snapshotDir, htd, false); + } + + public HTableDescriptor getTableDescriptor() { + return this.htd; + } + + public SnapshotDescription getSnapshotDescription() { + return this.desc; + } + + public Path getSnapshotsDir() { + return this.snapshotDir; + } + + public Path[] addRegion() throws IOException { + return addRegion(desc); + } + + public Path[] addRegionV1() throws IOException { + return addRegion(desc.toBuilder() + .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION) + .build()); + } + + public Path[] addRegionV2() throws IOException { + return addRegion(desc.toBuilder() + .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION) + .build()); + } + + private Path[] addRegion(final SnapshotDescription desc) throws IOException { + if (this.snapshotted == tableRegions.length) { + throw new UnsupportedOperationException("No more regions in the table"); + } + + RegionData regionData = tableRegions[this.snapshotted++]; + ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getName()); + SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor); + manifest.addRegion(regionData.tableDir, regionData.hri); + return regionData.files; + } + + public Path commit() throws IOException { + ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getName()); + SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor); + manifest.addTableDescriptor(htd); + manifest.consolidate(); + SnapshotDescriptionUtils.completeSnapshot(desc, rootDir, snapshotDir, fs); + snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, rootDir); + return snapshotDir; + } + } + + public SnapshotMock(final Configuration conf, final FileSystem fs, final Path rootDir) { + this.fs = fs; + this.conf = conf; + this.rootDir = rootDir; + } + + public SnapshotBuilder createSnapshotV1(final String snapshotName) throws IOException { + return createSnapshot(snapshotName, SnapshotManifestV1.DESCRIPTOR_VERSION); + } + + public SnapshotBuilder createSnapshotV2(final String snapshotName) throws IOException { + return createSnapshot(snapshotName, SnapshotManifestV2.DESCRIPTOR_VERSION); + } + + private SnapshotBuilder createSnapshot(final String snapshotName, final int version) + throws IOException { + HTableDescriptor htd = createHtd(snapshotName); + htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); + + RegionData[] regions = createTable(htd, TEST_NUM_REGIONS); + + SnapshotDescription desc = SnapshotDescription.newBuilder() + .setTable(htd.getNameAsString()) + .setName(snapshotName) + .setVersion(version) + .build(); + + Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); + SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs); + return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions); + } + + public HTableDescriptor createHtd(final String tableName) { + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); + return htd; + } + + private RegionData[] createTable(final HTableDescriptor htd, final int nregions) + throws IOException { + Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); + new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false); + + assertTrue(nregions % 2 == 0); + RegionData[] regions = new RegionData[nregions]; + for (int i = 0; i < regions.length; i += 2) { + byte[] startKey = Bytes.toBytes(0 + i * 2); + byte[] endKey = Bytes.toBytes(1 + i * 2); + + // First region, simple with one plain hfile. + HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey); + HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri); + regions[i] = new RegionData(tableDir, hri, 3); + for (int j = 0; j < regions[i].files.length; ++j) { + Path storeFile = createStoreFile(rfs.createTempName()); + regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile); + } + + // Second region, used to test the split case. + // This region contains a reference to the hfile in the first region. + startKey = Bytes.toBytes(2 + i * 2); + endKey = Bytes.toBytes(3 + i * 2); + hri = new HRegionInfo(htd.getTableName()); + rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri); + regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length); + for (int j = 0; j < regions[i].files.length; ++j) { + String refName = regions[i].files[j].getName() + '.' + regions[i].hri.getEncodedName(); + Path refFile = createStoreFile(new Path(rootDir, refName)); + regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile); + } + } + return regions; + } + + private Path createStoreFile(final Path storeFile) + throws IOException { + FSDataOutputStream out = fs.create(storeFile); + try { + out.write(Bytes.toBytes(storeFile.toString())); + } finally { + out.close(); + } + return storeFile; + } + } + + // ========================================================================== // Table Helpers // ========================================================================== public static void waitForTableToBeOnline(final HBaseTestingUtility util, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java deleted file mode 100644 index 51e934a..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.snapshot; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; -import org.apache.hadoop.hbase.snapshot.CopyRecoveredEditsTask; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.FSUtils; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.mockito.Mockito; - -/** - * Test that we correctly copy the recovered edits from a directory - */ -@Category(SmallTests.class) -public class TestCopyRecoveredEditsTask { - - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - @Test - public void testCopyFiles() throws Exception { - - SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); - ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); - FileSystem fs = UTIL.getTestFileSystem(); - Path root = UTIL.getDataTestDir(); - String regionName = "regionA"; - Path regionDir = new Path(root, regionName); - Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root); - - try { - // doesn't really matter where the region's snapshot directory is, but this is pretty close - Path snapshotRegionDir = new Path(workingDir, regionName); - fs.mkdirs(snapshotRegionDir); - - // put some stuff in the recovered.edits directory - Path edits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir); - fs.mkdirs(edits); - // make a file with some data - Path file1 = new Path(edits, "0000000000000002352"); - FSDataOutputStream out = fs.create(file1); - byte[] data = new byte[] { 1, 2, 3, 4 }; - out.write(data); - out.close(); - // make an empty file - Path empty = new Path(edits, "empty"); - fs.createNewFile(empty); - - CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, - snapshotRegionDir); - CopyRecoveredEditsTask taskSpy = Mockito.spy(task); - taskSpy.call(); - - Path snapshotEdits = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir); - FileStatus[] snapshotEditFiles = FSUtils.listStatus(fs, snapshotEdits); - assertEquals("Got wrong number of files in the snapshot edits", 1, snapshotEditFiles.length); - FileStatus file = snapshotEditFiles[0]; - assertEquals("Didn't copy expected file", file1.getName(), file.getPath().getName()); - - Mockito.verify(monitor, Mockito.never()).receive(Mockito.any(ForeignException.class)); - Mockito.verify(taskSpy, Mockito.never()).snapshotFailure(Mockito.anyString(), - Mockito.any(Exception.class)); - - } finally { - // cleanup the working directory - FSUtils.delete(fs, regionDir, true); - FSUtils.delete(fs, workingDir, true); - } - } - - /** - * Check that we don't get an exception if there is no recovered edits directory to copy - * @throws Exception on failure - */ - @Test - public void testNoEditsDir() throws Exception { - SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build(); - ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); - FileSystem fs = UTIL.getTestFileSystem(); - Path root = UTIL.getDataTestDir(); - String regionName = "regionA"; - Path regionDir = new Path(root, regionName); - Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root); - try { - // doesn't really matter where the region's snapshot directory is, but this is pretty close - Path snapshotRegionDir = new Path(workingDir, regionName); - fs.mkdirs(snapshotRegionDir); - Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir); - assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits)); - - CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, - snapshotRegionDir); - task.call(); - } finally { - // cleanup the working directory - FSUtils.delete(fs, regionDir, true); - FSUtils.delete(fs, workingDir, true); - } - } -} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 9b21f36..769f45e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -48,9 +48,12 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo; +import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.AfterClass; @@ -144,9 +147,13 @@ public class TestExportSnapshot { @Test public void testBalanceSplit() throws Exception { // Create a list of files - List> files = new ArrayList>(); + List> files = new ArrayList>(); for (long i = 0; i <= 20; i++) { - files.add(new Pair(new Path("file-" + i), i)); + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() + .setType(SnapshotFileInfo.Type.HFILE) + .setHfile("file-" + i) + .build(); + files.add(new Pair(fileInfo, i)); } // Create 5 groups (total size 210) @@ -155,18 +162,26 @@ public class TestExportSnapshot { // group 2: 18, 13, 8, 3 (total size: 42) // group 3: 17, 12, 7, 4 (total size: 42) // group 4: 16, 11, 6, 5 (total size: 42) - List> splits = ExportSnapshot.getBalancedSplits(files, 5); + List> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); - assertEquals(Arrays.asList(new Path("file-20"), new Path("file-11"), - new Path("file-10"), new Path("file-1"), new Path("file-0")), splits.get(0)); - assertEquals(Arrays.asList(new Path("file-19"), new Path("file-12"), - new Path("file-9"), new Path("file-2")), splits.get(1)); - assertEquals(Arrays.asList(new Path("file-18"), new Path("file-13"), - new Path("file-8"), new Path("file-3")), splits.get(2)); - assertEquals(Arrays.asList(new Path("file-17"), new Path("file-14"), - new Path("file-7"), new Path("file-4")), splits.get(3)); - assertEquals(Arrays.asList(new Path("file-16"), new Path("file-15"), - new Path("file-6"), new Path("file-5")), splits.get(4)); + + String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; + verifyBalanceSplit(splits.get(0), split0); + String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; + verifyBalanceSplit(splits.get(1), split1); + String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; + verifyBalanceSplit(splits.get(2), split2); + String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; + verifyBalanceSplit(splits.get(3), split3); + String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; + verifyBalanceSplit(splits.get(4), split4); + } + + private void verifyBalanceSplit(final List split, final String[] expected) { + assertEquals(expected.length, split.size()); + for (int i = 0; i < expected.length; ++i) { + assertEquals(expected[i], split.get(i).getHfile()); + } } /** @@ -234,6 +249,11 @@ public class TestExportSnapshot { r1fs.commitStoreFile(TEST_FAMILY, storeFile); Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName); + HTableDescriptor htd = new HTableDescriptor(tableWithRefsName); + htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); + new FSTableDescriptors(fs, rootDir) + .createTableDescriptorForTableDirectory(tableDir, htd, false); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf); SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs); @@ -346,13 +366,14 @@ public class TestExportSnapshot { new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); LOG.debug(listFiles(fs, exportedArchive, exportedArchive)); - SnapshotReferenceUtil.visitReferencedFiles(fs, exportedSnapshot, - new SnapshotReferenceUtil.FileVisitor() { - public void storeFile (final String region, final String family, final String hfile) - throws IOException { + SnapshotReferenceUtil.visitReferencedFiles(TEST_UTIL.getConfiguration(), fs, exportedSnapshot, + new SnapshotReferenceUtil.SnapshotVisitor() { + public void storeFile(final HRegionInfo regionInfo, final String family, + final SnapshotRegionManifest.StoreFile storeFile) throws IOException { + String hfile = storeFile.getName(); verifyNonEmptyFile(new Path(exportedArchive, new Path(FSUtils.getTableDir(new Path("./"), tableName), - new Path(region, new Path(family, hfile))))); + new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); } public void recoveredEdits (final String region, final String logfile) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index ee12d91..ec09553 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -399,46 +399,11 @@ public class TestFlushSnapshotFromClient { SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM); String snapshotName = "flushSnapshotCreateListDestroy"; - // test creating the snapshot - admin.snapshot(snapshotName, STRING_TABLE_NAME, SnapshotDescription.Type.FLUSH); - logFSTree(FSUtils.getRootDir(UTIL.getConfiguration())); - - // make sure we only have 1 matching snapshot - List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshotName, TABLE_NAME); - - // check the directory structure FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshots.get(0), rootDir); - assertTrue(fs.exists(snapshotDir)); - FSUtils.logFileSystemState(UTIL.getTestFileSystem(), snapshotDir, LOG); - Path snapshotinfo = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); - assertTrue(fs.exists(snapshotinfo)); - - // check the table info - HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, - rootDir, TABLE_NAME); - HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, - new Path(SnapshotDescriptionUtils.getSnapshotsDir(rootDir), snapshotName)); - assertEquals(desc, snapshotDesc); - - // check the region snapshot for all the regions - List regions = admin.getTableRegions(TABLE_NAME); - assertTrue(regions.size() > 1); - for (HRegionInfo info : regions) { - String regionName = info.getEncodedName(); - Path regionDir = new Path(snapshotDir, regionName); - HRegionInfo snapshotRegionInfo = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - assertEquals(info, snapshotRegionInfo); - // check to make sure we have the family - Path familyDir = new Path(regionDir, Bytes.toString(TEST_FAM)); - assertTrue("Missing region " + Bytes.toString(snapshotRegionInfo.getStartKey()), - fs.exists(familyDir)); - - // make sure we have some file references - assertTrue(fs.listStatus(familyDir).length > 0); - } + SnapshotTestingUtils.createSnapshotAndValidate(admin, + TableName.valueOf(STRING_TABLE_NAME), Bytes.toString(TEST_FAM), + snapshotName, rootDir, fs, true); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java deleted file mode 100644 index f11ea8a..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.snapshot; - -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.util.FSUtils; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.mockito.Mockito; - -@Category(SmallTests.class) -public class TestReferenceRegionHFilesTask { - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - - @Test - public void testRun() throws IOException { - FileSystem fs = UTIL.getTestFileSystem(); - // setup the region internals - Path testdir = UTIL.getDataTestDir(); - Path regionDir = new Path(testdir, "region"); - Path family1 = new Path(regionDir, "fam1"); - // make an empty family - Path family2 = new Path(regionDir, "fam2"); - fs.mkdirs(family2); - - // add some files to family 1 - Path file1 = new Path(family1, "05f99689ae254693836613d1884c6b63"); - fs.createNewFile(file1); - Path file2 = new Path(family1, "7ac9898bf41d445aa0003e3d699d5d26"); - fs.createNewFile(file2); - - // create the snapshot directory - Path snapshotRegionDir = new Path(testdir, HConstants.SNAPSHOT_DIR_NAME); - fs.mkdirs(snapshotRegionDir); - - SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("name") - .setTable("table").build(); - ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); - ReferenceRegionHFilesTask task = new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, - fs, snapshotRegionDir); - ReferenceRegionHFilesTask taskSpy = Mockito.spy(task); - task.call(); - - // make sure we never get an error - Mockito.verify(taskSpy, Mockito.never()).snapshotFailure(Mockito.anyString(), - Mockito.any(Exception.class)); - - // verify that all the hfiles get referenced - List hfiles = new ArrayList(2); - FileStatus[] regions = FSUtils.listStatus(fs, snapshotRegionDir); - for (FileStatus region : regions) { - FileStatus[] fams = FSUtils.listStatus(fs, region.getPath()); - for (FileStatus fam : fams) { - FileStatus[] files = FSUtils.listStatus(fs, fam.getPath()); - for (FileStatus file : files) { - hfiles.add(file.getPath().getName()); - } - } - } - assertTrue("Didn't reference :" + file1, hfiles.contains(file1.getName())); - assertTrue("Didn't reference :" + file1, hfiles.contains(file2.getName())); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index e2bf7b8..b015331 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -28,6 +28,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -46,6 +47,8 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.After; @@ -62,7 +65,6 @@ public class TestRestoreSnapshotHelper { final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static String TEST_FAMILY = "cf"; private final static String TEST_HFILE = "abc"; private Configuration conf; @@ -86,49 +88,65 @@ public class TestRestoreSnapshotHelper { @Test public void testRestore() throws IOException { - HTableDescriptor htd = createTableDescriptor("testtb"); - - Path snapshotDir = new Path(rootDir, "snapshot"); - createSnapshot(rootDir, snapshotDir, htd); + // Test Rolling-Upgrade like Snapshot. + // half machines writing using v1 and the others using v2 format. + SnapshotMock snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir); + SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("snapshot"); + builder.addRegionV1(); + builder.addRegionV2(); + builder.addRegionV2(); + builder.addRegionV1(); + Path snapshotDir = builder.commit(); + HTableDescriptor htd = builder.getTableDescriptor(); + SnapshotDescription desc = builder.getSnapshotDescription(); // Test clone a snapshot - HTableDescriptor htdClone = createTableDescriptor("testtb-clone"); - testRestore(snapshotDir, htd.getTableName().getNameAsString(), htdClone); + HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone"); + testRestore(snapshotDir, desc, htdClone); verifyRestore(rootDir, htd, htdClone); // Test clone a clone ("link to link") + SnapshotDescription cloneDesc = SnapshotDescription.newBuilder() + .setName("cloneSnapshot") + .setTable("testtb-clone") + .build(); Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName()); - HTableDescriptor htdClone2 = createTableDescriptor("testtb-clone2"); - testRestore(cloneDir, htdClone.getTableName().getNameAsString(), htdClone2); + HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2"); + testRestore(cloneDir, cloneDesc, htdClone2); verifyRestore(rootDir, htd, htdClone2); } private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, final HTableDescriptor htdClone) throws IOException { - String[] files = getHFiles(FSUtils.getTableDir(rootDir, htdClone.getTableName())); - assertEquals(2, files.length); - assertTrue(files[0] + " should be a HFileLink", HFileLink.isHFileLink(files[0])); - assertTrue(files[1] + " should be a Referene", StoreFileInfo.isReference(files[1])); - assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(files[0])); - assertEquals(TEST_HFILE, HFileLink.getReferencedHFileName(files[0])); - Path refPath = getReferredToFile(files[1]); - assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); - assertEquals(files[0], refPath.getName()); + String[] files = SnapshotTestingUtils.listHFileNames(fs, + FSUtils.getTableDir(rootDir, htdClone.getTableName())); + assertEquals(12, files.length); + for (int i = 0; i < files.length; i += 2) { + String linkFile = files[i]; + String refFile = files[i+1]; + assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile)); + assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile)); + assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile)); + Path refPath = getReferredToFile(refFile); + LOG.debug("get reference name for file " + refFile + " = " + refPath); + assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); + assertEquals(linkFile, refPath.getName()); + } } /** * Execute the restore operation * @param snapshotDir The snapshot directory to use as "restore source" - * @param sourceTableName The name of the snapshotted table + * @param sd The snapshot descriptor * @param htdClone The HTableDescriptor of the table to restore/clone. */ - public void testRestore(final Path snapshotDir, final String sourceTableName, + public void testRestore(final Path snapshotDir, final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException { LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir); FSUtils.logFileSystemState(fs, rootDir, LOG); new FSTableDescriptors(conf).createTableDescriptor(htdClone); - RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone); + RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone); helper.restoreHdfsRegions(); LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir); @@ -139,65 +157,19 @@ public class TestRestoreSnapshotHelper { * Initialize the restore helper, based on the snapshot and table information provided. */ private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir, - final String sourceTableName, final HTableDescriptor htdClone) throws IOException { + final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException { CatalogTracker catalogTracker = Mockito.mock(CatalogTracker.class); HTableDescriptor tableDescriptor = Mockito.mock(HTableDescriptor.class); ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class); MonitoredTask status = Mockito.mock(MonitoredTask.class); - SnapshotDescription sd = SnapshotDescription.newBuilder() - .setName("snapshot") - .setTable(sourceTableName) - .build(); - - return new RestoreSnapshotHelper(conf, fs, sd, snapshotDir, + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); + return new RestoreSnapshotHelper(conf, fs, manifest, htdClone, rootDir, monitor, status); } - private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd) - throws IOException { - // First region, simple with one plain hfile. - HRegionInfo hri = new HRegionInfo(htd.getTableName()); - HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri); - Path storeFile = new Path(rootDir, TEST_HFILE); - fs.createNewFile(storeFile); - r0fs.commitStoreFile(TEST_FAMILY, storeFile); - - // Second region, used to test the split case. - // This region contains a reference to the hfile in the first region. - hri = new HRegionInfo(htd.getTableName()); - HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri); - storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName()); - fs.createNewFile(storeFile); - r1fs.commitStoreFile(TEST_FAMILY, storeFile); - - Path tableDir = FSUtils.getTableDir(archiveDir, htd.getTableName()); - FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf); - } - - private HTableDescriptor createTableDescriptor(final String tableName) { - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); - htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); - return htd; - } - private Path getReferredToFile(final String referenceName) { Path fakeBasePath = new Path(new Path("table", "region"), "cf"); return StoreFileInfo.getReferredToFile(new Path(fakeBasePath, referenceName)); } - - private String[] getHFiles(final Path tableDir) throws IOException { - List files = new ArrayList(); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { - for (FileStatus file: FSUtils.listStatus(fs, familyDir)) { - files.add(file.getPath().getName()); - } - } - } - Collections.sort(files); - return files.toArray(new String[files.size()]); - } }