From cef281d7c5f839b6a0ef4adabf48eef5aa00d175 Mon Sep 17 00:00:00 2001 From: rahulgidwani Date: Thu, 26 Feb 2015 13:29:47 -0800 Subject: [PATCH] Ability to Snapshot key ranges --- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 98 +++++++-- .../snapshot/ClientSnapshotDescriptionUtils.java | 27 +++ .../hbase/protobuf/generated/HBaseProtos.java | 229 +++++++++++++++++++-- .../hbase/protobuf/generated/SnapshotProtos.java | 3 +- hbase-protocol/src/main/protobuf/HBase.proto | 2 + .../master/snapshot/MasterSnapshotVerifier.java | 3 + .../master/snapshot/RestoreSnapshotHandler.java | 4 +- .../snapshot/RegionServerSnapshotManager.java | 5 +- .../hbase/snapshot/SnapshotDescriptionUtils.java | 49 +++++ .../resources/hbase-webapps/master/snapshot.jsp | 5 + .../hbase/snapshot/TestSnapshotDescription.java | 93 +++++++++ .../hbase/snapshot/TestSnapshotKeyRanges.java | 129 ++++++++++++ hbase-shell/src/main/ruby/hbase/admin.rb | 32 +-- .../src/main/ruby/shell/commands/clone_snapshot.rb | 7 +- .../src/main/ruby/shell/commands/list_snapshots.rb | 7 +- .../main/ruby/shell/commands/restore_snapshot.rb | 4 + .../src/main/ruby/shell/commands/snapshot.rb | 2 + 17 files changed, 644 insertions(+), 55 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescription.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotKeyRanges.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 3acaaf9..88c8a70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -2999,6 +2999,48 @@ public class HBaseAdmin implements Admin { * Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. *

+ * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take + * @param startKey the start key (if using ranges) for the snapshot + * @param stopKey the end key (if using ranges) for the snapshot + * @throws IOException we fail to reach the master + * @throws SnapshotCreationException if snapshot creation failed + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + */ + public void snapshot(final String snapshotName, final String tableName, + SnapshotDescription.Type type, String startKey, String stopKey) + throws IOException, SnapshotCreationException, IllegalArgumentException { + SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); + builder.setTable(tableName); + builder.setName(snapshotName); + builder.setType(type); + if (startKey != null) { + builder.setStartRow(ByteString.copyFrom(Bytes.toBytesBinary(startKey))); + } + if (stopKey != null) { + builder.setStopRow(ByteString.copyFrom(Bytes.toBytesBinary(stopKey))); + } + ClientSnapshotDescriptionUtils + .validateRange(builder.getStartRow().toByteArray(), builder.getStopRow().toByteArray()); + snapshot(builder.build()); + } + + /** + * Take a snapshot and wait for the server to complete that snapshot (blocking). + *

+ * Only a single snapshot should be taken at a time for an instance of HBase, or results may be + * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a + * time for a single cluster). + *

+ * Snapshots are considered unique based on the name of the snapshot. Attempts to take a + * snapshot with the same name (even a different type or with different parameters) will fail with + * a {@link SnapshotCreationException} indicating the duplicate naming. + *

+ * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. + *

* You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])} * unless you are sure about the type of snapshot that you want to take. * @param snapshot snapshot to take @@ -3277,6 +3319,22 @@ public class HBaseAdmin implements Admin { * * @param snapshotName name of the snapshot to be cloned * @param tableName name of the table where the snapshot will be restored + * @param createTable should we create a new table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists + * @throws RestoreSnapshotException if snapshot failed to be cloned + * @throws IllegalArgumentException if the specified table has not a valid name + */ + public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName, boolean createTable) + throws IOException, TableExistsException, RestoreSnapshotException { + cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName), createTable); + } + + /** + * Create a new table by cloning the snapshot content. + * + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored * @throws IOException if a remote or network exception occurs * @throws TableExistsException if table to be created already exists * @throws RestoreSnapshotException if snapshot failed to be cloned @@ -3318,24 +3376,40 @@ public class HBaseAdmin implements Admin { @Override public void cloneSnapshot(final String snapshotName, final TableName tableName) throws IOException, TableExistsException, RestoreSnapshotException { - if (tableExists(tableName)) { + cloneSnapshot(snapshotName, tableName, true); + } + + /** + * Clone the snapshot content into an existing table. + * + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @param createTable should we create a new table + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists + * @throws RestoreSnapshotException if snapshot failed to be cloned + * @throws IllegalArgumentException if the specified table has not a valid name + */ + public void cloneSnapshot(final String snapshotName, TableName tableName, boolean createTable) + throws IOException { + if (createTable && tableExists(tableName)) { throw new TableExistsException(tableName); } internalRestoreSnapshot(snapshotName, tableName); waitUntilTableIsEnabled(tableName); } - /** - * Execute a distributed procedure on a cluster synchronously with return data - * - * @param signature A distributed procedure is uniquely identified - * by its signature (default the root ZK node name of the procedure). - * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. - * @param props Property/Value pairs of properties passing to the procedure - * @return data returned after procedure execution. null if no return data. - * @throws IOException - */ + /** + * Execute a distributed procedure on a cluster synchronously with return data + * + * @param signature A distributed procedure is uniquely identified + * by its signature (default the root ZK node name of the procedure). + * @param instance The instance name of the procedure. For some procedures, this parameter is + * optional. + * @param props Property/Value pairs of properties passing to the procedure + * @return data returned after procedure execution. null if no return data. + * @throws IOException + */ @Override public byte[] execProcedureWithRet(String signature, String instance, Map props) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index 59ba837..8764bc6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.snapshot; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; @@ -65,4 +66,30 @@ public class ClientSnapshotDescriptionUtils { " table=" + (ssd.hasTable()?TableName.valueOf(ssd.getTable()):"") + " type=" + ssd.getType() + " }"; } + + /** + * Check if the key range is valid startKey < stopKey + * @param startKey beginning key range + * @param stopKey ending key range + * @throws IllegalArgumentException if startKey >= stopKey + */ + public static void validateRange(byte[] startKey, byte[] stopKey) { + final boolean endKeyIsEndOfTable = Bytes.equals(stopKey, HConstants.EMPTY_END_ROW); + if ((Bytes.compareTo(startKey, stopKey) > 0) && !endKeyIsEndOfTable) { + throw new IllegalArgumentException( + "Invalid range: " + Bytes.toStringBinary(startKey) + + " > " + Bytes.toStringBinary(stopKey)); + } + } + + /** + * Is this a partial snapshot of a table or a complete snapshot + * + * @param snapshot - the snapshot in question + * @return true if the snapshot is partial, false if it is the complete table + */ + public static boolean isPartialSnapshot(HBaseProtos.SnapshotDescription snapshot) { + return !Bytes.equals(snapshot.getStartRow().toByteArray(), HConstants.EMPTY_BYTE_ARRAY) || + !Bytes.equals(snapshot.getStopRow().toByteArray(), HConstants.EMPTY_BYTE_ARRAY); + } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 3007d25..32ebd59 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -11732,6 +11732,26 @@ public final class HBaseProtos { */ com.google.protobuf.ByteString getOwnerBytes(); + + // optional bytes startRow = 7 [default = ""]; + /** + * optional bytes startRow = 7 [default = ""]; + */ + boolean hasStartRow(); + /** + * optional bytes startRow = 7 [default = ""]; + */ + com.google.protobuf.ByteString getStartRow(); + + // optional bytes stopRow = 8 [default = ""]; + /** + * optional bytes stopRow = 8 [default = ""]; + */ + boolean hasStopRow(); + /** + * optional bytes stopRow = 8 [default = ""]; + */ + com.google.protobuf.ByteString getStopRow(); } /** * Protobuf type {@code SnapshotDescription} @@ -11825,6 +11845,16 @@ public final class HBaseProtos { owner_ = input.readBytes(); break; } + case 58: { + bitField0_ |= 0x00000040; + startRow_ = input.readBytes(); + break; + } + case 66: { + bitField0_ |= 0x00000080; + stopRow_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -12145,6 +12175,38 @@ public final class HBaseProtos { } } + // optional bytes startRow = 7 [default = ""]; + public static final int STARTROW_FIELD_NUMBER = 7; + private com.google.protobuf.ByteString startRow_; + /** + * optional bytes startRow = 7 [default = ""]; + */ + public boolean hasStartRow() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bytes startRow = 7 [default = ""]; + */ + public com.google.protobuf.ByteString getStartRow() { + return startRow_; + } + + // optional bytes stopRow = 8 [default = ""]; + public static final int STOPROW_FIELD_NUMBER = 8; + private com.google.protobuf.ByteString stopRow_; + /** + * optional bytes stopRow = 8 [default = ""]; + */ + public boolean hasStopRow() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional bytes stopRow = 8 [default = ""]; + */ + public com.google.protobuf.ByteString getStopRow() { + return stopRow_; + } + private void initFields() { name_ = ""; table_ = ""; @@ -12152,6 +12214,8 @@ public final class HBaseProtos { type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; version_ = 0; owner_ = ""; + startRow_ = com.google.protobuf.Internal.bytesDefaultValue(""); + stopRow_ = com.google.protobuf.Internal.bytesDefaultValue(""); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -12187,6 +12251,12 @@ public final class HBaseProtos { if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(6, getOwnerBytes()); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBytes(7, startRow_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeBytes(8, stopRow_); + } getUnknownFields().writeTo(output); } @@ -12220,6 +12290,14 @@ public final class HBaseProtos { size += com.google.protobuf.CodedOutputStream .computeBytesSize(6, getOwnerBytes()); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(7, startRow_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(8, stopRow_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -12273,6 +12351,16 @@ public final class HBaseProtos { result = result && getOwner() .equals(other.getOwner()); } + result = result && (hasStartRow() == other.hasStartRow()); + if (hasStartRow()) { + result = result && getStartRow() + .equals(other.getStartRow()); + } + result = result && (hasStopRow() == other.hasStopRow()); + if (hasStopRow()) { + result = result && getStopRow() + .equals(other.getStopRow()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -12310,6 +12398,14 @@ public final class HBaseProtos { hash = (37 * hash) + OWNER_FIELD_NUMBER; hash = (53 * hash) + getOwner().hashCode(); } + if (hasStartRow()) { + hash = (37 * hash) + STARTROW_FIELD_NUMBER; + hash = (53 * hash) + getStartRow().hashCode(); + } + if (hasStopRow()) { + hash = (37 * hash) + STOPROW_FIELD_NUMBER; + hash = (53 * hash) + getStopRow().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -12436,6 +12532,10 @@ public final class HBaseProtos { bitField0_ = (bitField0_ & ~0x00000010); owner_ = ""; bitField0_ = (bitField0_ & ~0x00000020); + startRow_ = com.google.protobuf.Internal.bytesDefaultValue(""); + bitField0_ = (bitField0_ & ~0x00000040); + stopRow_ = com.google.protobuf.Internal.bytesDefaultValue(""); + bitField0_ = (bitField0_ & ~0x00000080); return this; } @@ -12488,6 +12588,14 @@ public final class HBaseProtos { to_bitField0_ |= 0x00000020; } result.owner_ = owner_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.startRow_ = startRow_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000080; + } + result.stopRow_ = stopRow_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -12528,6 +12636,12 @@ public final class HBaseProtos { owner_ = other.owner_; onChanged(); } + if (other.hasStartRow()) { + setStartRow(other.getStartRow()); + } + if (other.hasStopRow()) { + setStopRow(other.getStopRow()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -12907,6 +13021,78 @@ public final class HBaseProtos { return this; } + // optional bytes startRow = 7 [default = ""]; + private com.google.protobuf.ByteString startRow_ = com.google.protobuf.Internal.bytesDefaultValue(""); + /** + * optional bytes startRow = 7 [default = ""]; + */ + public boolean hasStartRow() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bytes startRow = 7 [default = ""]; + */ + public com.google.protobuf.ByteString getStartRow() { + return startRow_; + } + /** + * optional bytes startRow = 7 [default = ""]; + */ + public Builder setStartRow(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + startRow_ = value; + onChanged(); + return this; + } + /** + * optional bytes startRow = 7 [default = ""]; + */ + public Builder clearStartRow() { + bitField0_ = (bitField0_ & ~0x00000040); + startRow_ = getDefaultInstance().getStartRow(); + onChanged(); + return this; + } + + // optional bytes stopRow = 8 [default = ""]; + private com.google.protobuf.ByteString stopRow_ = com.google.protobuf.Internal.bytesDefaultValue(""); + /** + * optional bytes stopRow = 8 [default = ""]; + */ + public boolean hasStopRow() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional bytes stopRow = 8 [default = ""]; + */ + public com.google.protobuf.ByteString getStopRow() { + return stopRow_; + } + /** + * optional bytes stopRow = 8 [default = ""]; + */ + public Builder setStopRow(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000080; + stopRow_ = value; + onChanged(); + return this; + } + /** + * optional bytes stopRow = 8 [default = ""]; + */ + public Builder clearStopRow() { + bitField0_ = (bitField0_ & ~0x00000080); + stopRow_ = getDefaultInstance().getStopRow(); + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:SnapshotDescription) } @@ -17811,29 +17997,30 @@ public final class HBaseProtos { "r\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesB" + "ytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\"" + ",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030", - "\002 \001(\003\"\314\001\n\023SnapshotDescription\022\014\n\004name\030\001 " + + "\002 \001(\003\"\363\001\n\023SnapshotDescription\022\014\n\004name\030\001 " + "\002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(" + "\003:\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotDescription" + ".Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005owner\030\006" + - " \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n" + - "\tSKIPFLUSH\020\002\"}\n\024ProcedureDescription\022\021\n\t" + - "signature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcre" + - "ation_time\030\003 \001(\003:\0010\022&\n\rconfiguration\030\004 \003" + - "(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongM" + - "sg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndou", - "ble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdec" + - "imal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits" + - "\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Namespa" + - "ceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfigurat" + - "ion\030\002 \003(\0132\017.NameStringPair\"$\n\020RegionServ" + - "erInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022" + - "\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022" + - "\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007" + - "GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANO" + - "SECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECO", - "NDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS" + - "\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.hbase." + - "protobuf.generatedB\013HBaseProtosH\001\240\001\001" + " \001(\t\022\022\n\010startRow\030\007 \001(\014:\000\022\021\n\007stopRow\030\010 \001(" + + "\014:\000\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\t" + + "SKIPFLUSH\020\002\"}\n\024ProcedureDescription\022\021\n\ts" + + "ignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcrea" + + "tion_time\030\003 \001(\003:\0010\022&\n\rconfiguration\030\004 \003(" + + "\0132\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongMs", + "g\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndoub" + + "le_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdeci" + + "mal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030" + + "\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Namespac" + + "eDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfigurati" + + "on\030\002 \003(\0132\017.NameStringPair\"$\n\020RegionServe" + + "rInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022\010" + + "\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r" + + "\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007G" + + "REATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOS", + "ECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILLISECON" + + "DS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020" + + "\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.hbase.p" + + "rotobuf.generatedB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -17935,7 +18122,7 @@ public final class HBaseProtos { internal_static_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotDescription_descriptor, - new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", }); + new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", "StartRow", "StopRow", }); internal_static_ProcedureDescription_descriptor = getDescriptor().getMessageTypes().get(16); internal_static_ProcedureDescription_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java index 8dbb5ad..e07b005 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SnapshotProtos.java @@ -3789,7 +3789,8 @@ public final class SnapshotProtos { /** * Protobuf type {@code SnapshotDataManifest} */ - public static final class SnapshotDataManifest extends + public static final class + SnapshotDataManifest extends com.google.protobuf.GeneratedMessage implements SnapshotDataManifestOrBuilder { // Use SnapshotDataManifest.newBuilder() to construct. diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index 00e2850..2818a0b 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -183,6 +183,8 @@ message SnapshotDescription { optional Type type = 4 [default = FLUSH]; optional int32 version = 5; optional string owner = 6; + optional bytes startRow = 7 [default = ""]; + optional bytes stopRow = 8 [default = ""]; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index b21f4e7..c1af871 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -160,6 +160,9 @@ public final class MasterSnapshotVerifier { // Remove the non-default regions RegionReplicaUtil.removeNonDefaultRegions(regions); + // filter out regions outside of the key range specified + regions = SnapshotDescriptionUtils.filterHRegionInfos(regions, snapshot); + Map regionManifests = manifest.getRegionManifestsMap(); if (regionManifests == null) { String msg = "Snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " looks empty"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java index 56faf76..d75a374 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java @@ -148,7 +148,9 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho // not overwritten/removed, so you end up with old informations // that are not correct after the restore. List hrisToRemove = new LinkedList(); - if (metaChanges.hasRegionsToRemove()) hrisToRemove.addAll(metaChanges.getRegionsToRemove()); + if (metaChanges.hasRegionsToRemove() && + !ClientSnapshotDescriptionUtils.isPartialSnapshot(snapshot)) + hrisToRemove.addAll(metaChanges.getRegionsToRemove()); MetaTableAccessor.deleteRegions(conn, hrisToRemove); // 4.2 Add the new set of regions to META diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 93d836d..dd52992 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -221,7 +222,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * @throws IOException */ private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - List onlineRegions = rss.getOnlineRegions(TableName.valueOf(snapshot.getTable())); + List onlineRegions = SnapshotDescriptionUtils.filterHRegions( + rss.getOnlineRegions(TableName.valueOf(snapshot.getTable())), snapshot); + Iterator iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index cd04b82..7d2be6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hbase.snapshot; import java.io.IOException; import java.util.Collections; +import java.util.List; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -29,9 +31,12 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; @@ -330,4 +335,48 @@ public class SnapshotDescriptionUtils { if (!snapshot.hasOwner()) return false; return snapshot.getOwner().equals(user.getShortName()); } + + /** + * Filter out regions that are not within the specified key range + * @param onlineRegions list of regions + * @param snapshot the table snapshot description + * @return only the regions that overlap the specified start and stop keys in the snapshot + */ + public static List filterHRegions(List onlineRegions, + SnapshotDescription snapshot) { + byte[] startKey = snapshot.getStartRow().toByteArray(); + byte[] endKey = snapshot.getStopRow().toByteArray(); + List result = Lists.newArrayList(); + for (HRegion region : onlineRegions) { + if (compareRange(startKey, region.getEndKey()) && compareRange(region.getStartKey(), + endKey)) { + result.add(region); + } + } + return result; + } + + /** + * Filter out regions that are not within the specified key range + * @param onlineRegions list of regions + * @param snapshot the table snapshot description + * @return only the regions that overlap the specified start and stop keys in the snapshot + */ + public static List filterHRegionInfos(List onlineRegions, + SnapshotDescription snapshot) { + byte[] startKey = snapshot.getStartRow().toByteArray(); + byte[] endKey = snapshot.getStopRow().toByteArray(); + List result = Lists.newArrayList(); + for (HRegionInfo region : onlineRegions) { + if (compareRange(startKey, region.getEndKey()) && compareRange(region.getStartKey(), + endKey)) { + result.add(region); + } + } + return result; + } + + private static boolean compareRange(byte[] startKey, byte[] endKey) { + return Bytes.compareTo(startKey, endKey) < 0 || Bytes.equals(endKey, HConstants.LAST_ROW); + } } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index 831835e..af22f34 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -18,6 +18,7 @@ */ --%> <%@ page contentType="text/html;charset=UTF-8" + import="static org.apache.commons.lang.StringEscapeUtils.escapeXml" import="java.util.Date" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.Admin" @@ -28,6 +29,7 @@ import="org.apache.hadoop.util.StringUtils" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.HBaseConfiguration" %> +<%@ page import="org.apache.hadoop.hbase.util.Bytes" %> <% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); @@ -152,6 +154,7 @@ Type Format Version State + Key Range @@ -164,6 +167,8 @@ <% } else { %> ok <% } %> + <%= escapeXml(Bytes.toStringBinary(snapshot.getStartRow().toByteArray())) %> + , <%= escapeXml(Bytes.toStringBinary(snapshot.getStopRow().toByteArray())) %>

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescription.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescription.java new file mode 100644 index 0000000..467a937 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotDescription.java @@ -0,0 +1,93 @@ +package org.apache.hadoop.hbase.snapshot;/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.List; + +import static junit.framework.TestCase.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@Category(SmallTests.class) +public class TestSnapshotDescription { + + private static final byte[] TABLE = Bytes.toBytes("tabletime"); + + @Test + public void testFilteringKeyRanges() throws Exception { + List infos = Lists.newArrayList( + mockInfo(null, "aaa"), + mockInfo("aaa", "bbb"), + mockInfo("bbb", "ccc"), + mockInfo("ccc", "ddd"), + mockInfo("eee", "fff"), + mockInfo("yyy", "zzz"), + mockInfo("zzz", null) + ); + List regions = SnapshotDescriptionUtils.filterHRegionInfos(infos, + makeSnapshot("bbb", "eee")); + assertEquals(2, regions.size()); + + regions = SnapshotDescriptionUtils.filterHRegionInfos(infos, + makeSnapshot(null, "eee")); + assertEquals(4, regions.size()); + + regions = SnapshotDescriptionUtils.filterHRegionInfos(infos, + makeSnapshot("eee", null)); + assertEquals(3, regions.size()); + } + + private HRegionInfo mockInfo(String startKey, String endKey) { + HRegionInfo info = mock(HRegionInfo.class); + byte[] startKeyBytes = HConstants.EMPTY_START_ROW; + if (startKey != null) { + startKeyBytes = Bytes.toBytesBinary(startKey); + } + byte[] endKeyBytes = HConstants.LAST_ROW; + if (endKey != null) { + endKeyBytes = Bytes.toBytesBinary(endKey); + } + when(info.getStartKey()).thenReturn(startKeyBytes); + when(info.getEndKey()).thenReturn(endKeyBytes); + return info; + } + + private HBaseProtos.SnapshotDescription makeSnapshot(String startKey, String endKey) { + HBaseProtos.SnapshotDescription.Builder builder = + HBaseProtos.SnapshotDescription.newBuilder().setTable(Bytes.toString(TABLE)). + setName("snaaaapSHOT"); + if (startKey != null) { + builder = builder.setStartRow(ByteString.copyFrom(Bytes.toBytesBinary(startKey))); + } + if (endKey != null) { + builder = builder.setStopRow(ByteString.copyFrom(Bytes.toBytesBinary(endKey))); + } + + return builder.build(); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotKeyRanges.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotKeyRanges.java new file mode 100644 index 0000000..454af12 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotKeyRanges.java @@ -0,0 +1,129 @@ +package org.apache.hadoop.hbase.snapshot;/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.List; + +import static junit.framework.Assert.assertEquals; + +@Category({MediumTests.class, RegionServerTests.class}) +public class TestSnapshotKeyRanges { + private static final Log LOG = LogFactory.getLog(TestSnapshotKeyRanges.class); + + private final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; + public static byte[] bbb = Bytes.toBytes("bbb"); + public static byte[] yyy = Bytes.toBytes("yyy"); + + @Before + public void setUp() throws Exception { + UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); + UTIL.startMiniCluster(); + } + + @After + public void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test + public void testBasicFunctionality() throws Exception { + byte[] tableName = Bytes.toBytes("someTable"); + String start = "bbb"; + String end = "ccc"; + String snapshotName = "foo"; + HTable table = createTable(tableName, 50); + List regionsToSnapshot = + snapshotTableRange(table, snapshotName, start, end); + Path completedDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, + UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir()); + FileStatus[] fileStatuses = UTIL.getTestFileSystem().listStatus(completedDir); + FSUtils.logFileSystemState(UTIL.getTestFileSystem(), completedDir, LOG); + int regionCount = 0; + for (FileStatus status : fileStatuses) { + if (status.isDir()) { + if (!status.getPath().getName().equals(".tmp")) + regionCount++; + } + } + LOG.info("regionsToSnapshot = " + regionsToSnapshot); + assertEquals(regionsToSnapshot.size(), regionCount); + List snapshots = + UTIL.getHBaseAdmin().listSnapshots(); + assertEquals(1, snapshots.size()); + HBaseProtos.SnapshotDescription onlySnapshot = Iterables.getOnlyElement(snapshots); + assertEquals(start, Bytes.toStringBinary(onlySnapshot.getStartRow().toByteArray())); + assertEquals(end, Bytes.toStringBinary(onlySnapshot.getStopRow().toByteArray())); + } + + public List snapshotTableRange(HTable table, + String snapshotName, String start, String end) + throws Exception { + + HBaseAdmin admin = UTIL.getHBaseAdmin(); + List regionsMatching = Lists.newArrayList(); + List allRegions = admin.getTableRegions(table.getTableName()); + for (HRegionInfo onlineRegion : allRegions) { + if (onlineRegion.containsRange(Bytes.toBytesBinary(start), Bytes.toBytesBinary(end))) { + regionsMatching.add(onlineRegion); + } + } + + admin.snapshot(snapshotName, Bytes.toString(table.getTableName()), + HBaseProtos.SnapshotDescription.Type.FLUSH, start, + end); + return regionsMatching; + } + + private HTable createTable(byte[] tableName, int numRegions) throws IOException { + try { + UTIL.deleteTable(tableName); + } catch (Exception ex) { + // ignore + } + + UTIL.createTable(tableName, FAMILIES, 1, bbb, yyy, numRegions); + // put some stuff in the table + HTable table = new HTable(UTIL.getConfiguration(), tableName); + UTIL.loadTable(table, FAMILIES); + return table; + } +} \ No newline at end of file diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index f8ab36e..f8bda51 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -814,17 +814,23 @@ module Hbase #---------------------------------------------------------------------------------------------- # Take a snapshot of specified table def snapshot(table, snapshot_name, *args) - if args.empty? - @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes) - else - args.each do |arg| - if arg[SKIP_FLUSH] == true - @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes, SnapshotDescription::Type::SKIPFLUSH) - else - @admin.snapshot(snapshot_name.to_java_bytes, table.to_java_bytes) - end - end + start = nil + stop = nil + flush = SnapshotDescription::Type::FLUSH + args.each do |arg| + if arg.has_key?(SKIP_FLUSH) + if arg[SKIP_FLUSH] == true + flush = SnapshotDescription::Type::SKIPFLUSH + end + end + if arg.has_key?(STARTROW) + start = arg[STARTROW] + end + if arg.has_key?(STOPROW) + stop = arg[STOPROW] + end end + @admin.snapshot(snapshot_name, table, flush, start, stop) end #---------------------------------------------------------------------------------------------- @@ -835,8 +841,8 @@ module Hbase #---------------------------------------------------------------------------------------------- # Create a new table by cloning the snapshot content - def clone_snapshot(snapshot_name, table) - @admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes) + def clone_snapshot(snapshot_name, table, create_table=true) + @admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes, create_table) end #---------------------------------------------------------------------------------------------- @@ -977,4 +983,4 @@ module Hbase end end -end +end \ No newline at end of file diff --git a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb index 8c193bb..088e336 100644 --- a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb +++ b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb @@ -21,19 +21,20 @@ module Shell class CloneSnapshot < Command def help return <<-EOF -Create a new table by cloning the snapshot content. +Create or restore table by cloning the snapshot content. There're no copies of data involved. And writing on the newly created table will not influence the snapshot data. Examples: hbase> clone_snapshot 'snapshotName', 'tableName' hbase> clone_snapshot 'snapshotName', 'namespace:tableName' + hbase> clone_snapshot 'snapshotName', 'namespace:tableName', 'createTable [true|false]' EOF end - def command(snapshot_name, table) + def command(snapshot_name, table, create_table=true) format_simple_command do - admin.clone_snapshot(snapshot_name, table) + admin.clone_snapshot(snapshot_name, table, create_table) end end end diff --git a/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb b/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb index 4e68802..0896b16 100644 --- a/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb +++ b/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb @@ -35,14 +35,15 @@ EOF def command(regex = ".*") now = Time.now - formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"]) + formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME", "START_ROW", "STOP_ROW"]) list = admin.list_snapshot(regex) list.each do |snapshot| creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s - formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ]) + start = org.apache.hadoop.hbase.util.Bytes.toStringBinary(snapshot.getStartRow.toByteArray) + stop = org.apache.hadoop.hbase.util.Bytes.toStringBinary(snapshot.getStopRow.toByteArray) + formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")", "[ " + start + " => " + stop + " ]"]) end - formatter.footer(now, list.size) return list.map { |s| s.getName() } end diff --git a/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb index 4d53171..1b2d9b6 100644 --- a/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb +++ b/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb @@ -24,6 +24,10 @@ module Shell Restore a specified snapshot. The restore will replace the content of the original table, bringing back the content to the snapshot state. +If the snapshot is a full table snapshot we will remove table regions which do not overlap the +snapshot. +If the snapshot is a partial key range, we will not remove table regions which do not overlap the +snapshot. The table must be disabled. Examples: diff --git a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb index 15bf298..52c5b47 100644 --- a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb +++ b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb @@ -25,6 +25,8 @@ Take a snapshot of specified table. Examples: hbase> snapshot 'sourceTable', 'snapshotName' hbase> snapshot 'namespace:sourceTable', 'snapshotName', {SKIP_FLUSH => true} + hbase> snapshot 'sourceTable', 'snapshotName', {SKIP_FLUSH => true}, {STARTROW => 'key'}, {STOPROW => 'key'} + EOF end -- 2.1.0