diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
index d495295..28e1e3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java
@@ -18,21 +18,25 @@
package org.apache.hadoop.hbase;
+import java.io.IOException;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
-import org.apache.hadoop.hbase.security.TokenInfo;
-import org.apache.hadoop.hbase.security.KerberosInfo;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
@@ -43,6 +47,11 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableR
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
@@ -53,17 +62,19 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegio
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-
+import org.apache.hadoop.hbase.security.KerberosInfo;
+import org.apache.hadoop.hbase.security.TokenInfo;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@@ -346,4 +357,54 @@ public interface MasterAdminProtocol extends
@Override
public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
IsCatalogJanitorEnabledRequest req) throws ServiceException;
+
+ /**
+ * Create a snapshot for the given table.
+ * @param controller Unused (set to null).
+ * @param snapshot description of the snapshot to take
+ * @return empty response on success
+ * @throws ServiceException if the snapshot cannot be taken
+ */
+ @Override
+ public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest snapshot)
+ throws ServiceException;
+
+ /**
+ * List existing snapshots.
+ * @param controller Unused (set to null).
+ * @param request information about the request (can be empty)
+ * @return {@link ListSnapshotResponse} - a list of {@link SnapshotDescription}
+ * @throws ServiceException if we cannot reach the filesystem
+ */
+ @Override
+ public ListSnapshotResponse listSnapshots(RpcController controller, ListSnapshotRequest request)
+ throws ServiceException;
+
+ /**
+ * Delete an existing snapshot. This method can also be used to clean up a aborted snapshot.
+ * @param controller Unused (set to null).
+ * @param snapshotName snapshot to delete
+ * @return true if the snapshot was deleted, false if the snapshot didn't exist
+ * originally
+ * @throws ServiceException if the filesystem cannot be reached
+ */
+ @Override
+ public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
+ DeleteSnapshotRequest snapshotName) throws ServiceException;
+
+ /**
+ * Check to see if the snapshot is done.
+ * @param controller Unused (set to null).
+ * @param request name of the snapshot to check.
+ * @throws ServiceException around possible exceptions:
+ *
+ * - {@link UnknownSnapshotException} if the passed snapshot name doesn't match the
+ * current snapshot or there is no previous snapshot.
+ * - {@link SnapshotCreationException} if the snapshot couldn't complete because of
+ * errors
+ *
+ */
+ @Override
+ public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
+ IsSnapshotDoneRequest request) throws ServiceException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 8946b0a..1ef678e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -72,20 +72,28 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRespo
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
@@ -93,8 +101,10 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaA
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
@@ -2037,6 +2047,188 @@ public class HBaseAdmin implements Abortable, Closeable {
}
/**
+ * Create a timestamp consistent snapshot for the given table.
+ * @param snapshotName name of the snapshot to be created
+ * @param tableName name of the table for which snapshot is created
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void snapshot(final String snapshotName, final String tableName) throws IOException {
+ snapshot(snapshotName, tableName, SnapshotDescription.Type.TIMESTAMP);
+ }
+
+ /**
+ * Create a timestamp consistent snapshot for the given table.
+ * @param snapshotName name of the snapshot to be created
+ * @param tableName name of the table for which snapshot is created
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void snapshot(final byte[] snapshotName, final byte[] tableName) throws IOException {
+ snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName));
+ }
+
+ /**
+ * Create typed snapshot of the table.
+ *
+ * Generally, you should not use this, but instead just take a {@link Type#TIMESTAMP
+ * Timestamp-consistentSnapshot} with {@link #snapshot(byte[], byte[])} or
+ * {@link #snapshot(String, String)}, which creates a timestamp-based snapshot, causing minimal
+ * interference with running cluster.
+ *
+ * However, this method can be used to launch a {@link Type#GLOBAL GlobalSnapshot}. Note that a
+ * {@link Type#GLOBAL GlobalSnapshot}will block all writes to the table while taking the
+ * snapshot. This occurs so a single stable state can be achieved across all servers hosting the
+ * table - this is beyond the consistency constraints placed on an HBase table. This type of
+ * snapshot has two main implications:
+ *
+ * - all writes to the table will block while taking the snapshot
+ * - the probability of success decreases with increasing cluster size and is not recommended
+ * for clusters much greater than 500 nodes
+ *
+ * Together, the two above considerations mean to get a snapshot with any real load on your
+ * system, you will likely have multiple attempts and will suffer notable performance degradation,
+ * for a large cluster.
+ *
+ * This can be suitable for a smaller cluster, but comes with the above caveats - user beware (you
+ * should really consider if you can get by with just using timestamp-consistent snapshots via
+ * {@link #snapshot(byte[], byte[])}) or {@link #snapshot(String, String)}.
+ * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
+ * snapshots stored on the cluster
+ * @param tableName name of the table to snapshot
+ * @param type type of snapshot to take
+ * @throws IOException we fail to reach the master
+ * @throws SnapshotCreationException if snapshot creation failed
+ */
+ public void snapshot(final String snapshotName, final String tableName,
+ SnapshotDescription.Type type) throws IOException, SnapshotCreationException {
+ SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+ builder.setTable(tableName);
+ builder.setName(snapshotName);
+ builder.setType(type);
+ snapshot(builder.build());
+ }
+
+ /**
+ * Take a snapshot and wait for the server to complete that snapshot (blocking).
+ *
+ * Only a single snapshot should be taken at a time, or results may be undefined.
+ *
+ * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
+ * unless you are sure about the type of snapshot that you want to take.
+ * @param snapshot snapshot to take
+ * @throws IOException or we lose contact with the master.
+ * @throws SnapshotCreationException if snapshot creation failed
+ */
+ public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException {
+ // make sure the snapshot name is valid
+ HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getName()));
+ HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getTable()));
+
+ // actually take the snapshot
+ TakeSnapshotResponse response = takeSnapshotAsync(snapshot);
+ final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
+ .build();
+ IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder().buildPartial();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ long max = response.getExpectedTime();
+ long maxPauseTime = max / this.numRetries;
+ int tries = 0;
+ LOG.debug("Waiting a max of " + max + " ms for snapshot to complete. (max " + maxPauseTime
+ + " ms per retry)");
+ while ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone()) {
+ try {
+ // sleep a backoff <= pauseTime amount
+ long sleep = getPauseTime(tries++);
+ LOG.debug("Found sleep:" + sleep);
+ sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
+ LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot to complete.");
+ Thread.sleep(sleep);
+
+ } catch (InterruptedException e) {
+ LOG.debug("Interrupted while waiting for snapshot " + snapshot + " to complete");
+ Thread.currentThread().interrupt();
+ }
+ LOG.debug("Getting current status of snasphot from master...");
+ done = execute(new MasterAdminCallable() {
+ @Override
+ public IsSnapshotDoneResponse call() throws ServiceException {
+ return masterAdmin.isSnapshotDone(null, request);
+ }
+ });
+ }
+ if (!done.getDone()) {
+ throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
+ + "' wasn't completed in expectedTime:" + max + " ms");
+ }
+ }
+
+ /**
+ * Take a snapshot and wait for the server to complete that snapshot (asynchronous)
+ *
+ * Only a single snapshot should be taken at a time, or results may be undefined.
+ * @param snapshot snapshot to take
+ * @return response from the server indicating the max time to wait for the snapshot
+ * @throws IOException if the snapshot did not succeed or we lose contact with the master.
+ * @throws SnapshotCreationException if snapshot creation failed
+ */
+ public TakeSnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
+ SnapshotCreationException {
+ final TakeSnapshotRequest request = TakeSnapshotRequest.newBuilder().setSnapshot(snapshot)
+ .build();
+ // run the snapshot on the master
+ return execute(new MasterAdminCallable() {
+ @Override
+ public TakeSnapshotResponse call() throws ServiceException {
+ return masterAdmin.snapshot(null, request);
+ }
+ });
+ }
+
+ /**
+ * List existing snapshots.
+ * @return a list of snapshot descriptor for existing snapshots
+ * @throws IOException if a network error occurs
+ */
+ public List listSnapshots() throws IOException {
+ return execute(new MasterAdminCallable>() {
+ @Override
+ public List call() throws ServiceException {
+ return masterAdmin.listSnapshots(null, ListSnapshotRequest.newBuilder().build())
+ .getSnapshotsList();
+ }
+ });
+ }
+
+ /**
+ * Delete an existing snapshot.
+ * @param snapshotName name of the snapshot
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void deleteSnapshot(final byte[] snapshotName) throws IOException {
+ deleteSnapshot(Bytes.toString(snapshotName));
+ }
+
+ /**
+ * Delete an existing snapshot.
+ * @param snapshotName name of the snapshot
+ * @throws IOException if a remote or network exception occurs
+ */
+ public void deleteSnapshot(final String snapshotName) throws IOException {
+ // make sure the snapshot is possibly valid
+ HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshotName));
+ // do the delete
+ execute(new MasterAdminCallable() {
+ @Override
+ public Void call() throws ServiceException {
+ masterAdmin.deleteSnapshot(
+ null,
+ DeleteSnapshotRequest.newBuilder()
+ .setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build());
+ return null;
+ }
+ });
+ }
+
+ /**
* @see {@link #execute(MasterAdminCallable)}
*/
private abstract static class MasterAdminCallable implements Callable{
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a3956d7..2069e58 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -120,6 +120,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableR
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
@@ -130,6 +132,10 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableR
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
@@ -144,6 +150,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequ
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
@@ -164,6 +172,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -185,7 +194,6 @@ import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.net.DNS;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Watcher;
-import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@@ -2281,4 +2289,32 @@ Server {
public HFileCleaner getHFileCleaner() {
return this.hfileCleaner;
}
+
+ @Override
+ public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest request)
+ throws ServiceException {
+ throw new ServiceException(new UnsupportedOperationException(
+ "Snapshots are not implemented yet."));
+ }
+
+ @Override
+ public ListSnapshotResponse listSnapshots(RpcController controller, ListSnapshotRequest request)
+ throws ServiceException {
+ throw new ServiceException(new UnsupportedOperationException(
+ "Snapshots are not implemented yet."));
+ }
+
+ @Override
+ public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
+ DeleteSnapshotRequest request) throws ServiceException {
+ throw new ServiceException(new UnsupportedOperationException(
+ "Snapshots are not implemented yet."));
+ }
+
+ @Override
+ public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
+ IsSnapshotDoneRequest request) throws ServiceException {
+ throw new ServiceException(new UnsupportedOperationException(
+ "Snapshots are not implemented yet."));
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index 25e0705..81f6cbe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -13967,7 +13967,7 @@ public final class ClientProtos {
getFamilyPathOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPathOrBuilder getFamilyPathOrBuilder(
int index);
-
+
// optional bool assignSeqNum = 3;
boolean hasAssignSeqNum();
boolean getAssignSeqNum();
@@ -14537,7 +14537,7 @@ public final class ClientProtos {
public boolean getAssignSeqNum() {
return assignSeqNum_;
}
-
+
private void initFields() {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
familyPath_ = java.util.Collections.emptyList();
@@ -15261,7 +15261,7 @@ public final class ClientProtos {
onChanged();
return this;
}
-
+
// @@protoc_insertion_point(builder_scope:BulkLoadHFileRequest)
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index bb34901..741f95d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -11054,6 +11054,735 @@ public final class HBaseProtos {
// @@protoc_insertion_point(class_scope:BytesBytesPair)
}
+ public interface SnapshotDescriptionOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string name = 1;
+ boolean hasName();
+ String getName();
+
+ // optional string table = 2;
+ boolean hasTable();
+ String getTable();
+
+ // optional int64 creationTime = 3 [default = 0];
+ boolean hasCreationTime();
+ long getCreationTime();
+
+ // optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
+ boolean hasType();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType();
+ }
+ public static final class SnapshotDescription extends
+ com.google.protobuf.GeneratedMessage
+ implements SnapshotDescriptionOrBuilder {
+ // Use SnapshotDescription.newBuilder() to construct.
+ private SnapshotDescription(Builder builder) {
+ super(builder);
+ }
+ private SnapshotDescription(boolean noInit) {}
+
+ private static final SnapshotDescription defaultInstance;
+ public static SnapshotDescription getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SnapshotDescription getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable;
+ }
+
+ public enum Type
+ implements com.google.protobuf.ProtocolMessageEnum {
+ TIMESTAMP(0, 0),
+ GLOBAL(1, 1),
+ ;
+
+ public static final int TIMESTAMP_VALUE = 0;
+ public static final int GLOBAL_VALUE = 1;
+
+
+ public final int getNumber() { return value; }
+
+ public static Type valueOf(int value) {
+ switch (value) {
+ case 0: return TIMESTAMP;
+ case 1: return GLOBAL;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public Type findValueByNumber(int number) {
+ return Type.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final Type[] VALUES = {
+ TIMESTAMP, GLOBAL,
+ };
+
+ public static Type valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private Type(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:SnapshotDescription.Type)
+ }
+
+ private int bitField0_;
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.lang.Object name_;
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getName() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ name_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getNameBytes() {
+ java.lang.Object ref = name_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ name_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string table = 2;
+ public static final int TABLE_FIELD_NUMBER = 2;
+ private java.lang.Object table_;
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getTable() {
+ java.lang.Object ref = table_;
+ if (ref instanceof String) {
+ return (String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ String s = bs.toStringUtf8();
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
+ table_ = s;
+ }
+ return s;
+ }
+ }
+ private com.google.protobuf.ByteString getTableBytes() {
+ java.lang.Object ref = table_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
+ table_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional int64 creationTime = 3 [default = 0];
+ public static final int CREATIONTIME_FIELD_NUMBER = 3;
+ private long creationTime_;
+ public boolean hasCreationTime() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getCreationTime() {
+ return creationTime_;
+ }
+
+ // optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
+ public static final int TYPE_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_;
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() {
+ return type_;
+ }
+
+ private void initFields() {
+ name_ = "";
+ table_ = "";
+ creationTime_ = 0L;
+ type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getTableBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeInt64(3, creationTime_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeEnum(4, type_.getNumber());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getTableBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(3, creationTime_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(4, type_.getNumber());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) obj;
+
+ boolean result = true;
+ result = result && (hasName() == other.hasName());
+ if (hasName()) {
+ result = result && getName()
+ .equals(other.getName());
+ }
+ result = result && (hasTable() == other.hasTable());
+ if (hasTable()) {
+ result = result && getTable()
+ .equals(other.getTable());
+ }
+ result = result && (hasCreationTime() == other.hasCreationTime());
+ if (hasCreationTime()) {
+ result = result && (getCreationTime()
+ == other.getCreationTime());
+ }
+ result = result && (hasType() == other.hasType());
+ if (hasType()) {
+ result = result &&
+ (getType() == other.getType());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasName()) {
+ hash = (37 * hash) + NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getName().hashCode();
+ }
+ if (hasTable()) {
+ hash = (37 * hash) + TABLE_FIELD_NUMBER;
+ hash = (53 * hash) + getTable().hashCode();
+ }
+ if (hasCreationTime()) {
+ hash = (37 * hash) + CREATIONTIME_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getCreationTime());
+ }
+ if (hasType()) {
+ hash = (37 * hash) + TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getType());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ name_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ table_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ creationTime_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription build() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.name_ = name_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.table_ = table_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.creationTime_ = creationTime_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.type_ = type_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasTable()) {
+ setTable(other.getTable());
+ }
+ if (other.hasCreationTime()) {
+ setCreationTime(other.getCreationTime());
+ }
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ name_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ table_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ creationTime_ = input.readInt64();
+ break;
+ }
+ case 32: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(4, rawValue);
+ } else {
+ bitField0_ |= 0x00000008;
+ type_ = value;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required string name = 1;
+ private java.lang.Object name_ = "";
+ public boolean hasName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public String getName() {
+ java.lang.Object ref = name_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ name_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
+ }
+ public Builder setName(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ name_ = getDefaultInstance().getName();
+ onChanged();
+ return this;
+ }
+ void setName(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000001;
+ name_ = value;
+ onChanged();
+ }
+
+ // optional string table = 2;
+ private java.lang.Object table_ = "";
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public String getTable() {
+ java.lang.Object ref = table_;
+ if (!(ref instanceof String)) {
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
+ table_ = s;
+ return s;
+ } else {
+ return (String) ref;
+ }
+ }
+ public Builder setTable(String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ table_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearTable() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ table_ = getDefaultInstance().getTable();
+ onChanged();
+ return this;
+ }
+ void setTable(com.google.protobuf.ByteString value) {
+ bitField0_ |= 0x00000002;
+ table_ = value;
+ onChanged();
+ }
+
+ // optional int64 creationTime = 3 [default = 0];
+ private long creationTime_ ;
+ public boolean hasCreationTime() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public long getCreationTime() {
+ return creationTime_;
+ }
+ public Builder setCreationTime(long value) {
+ bitField0_ |= 0x00000004;
+ creationTime_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearCreationTime() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ creationTime_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional .SnapshotDescription.Type type = 4 [default = TIMESTAMP];
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() {
+ return type_;
+ }
+ public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.TIMESTAMP;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:SnapshotDescription)
+ }
+
+ static {
+ defaultInstance = new SnapshotDescription(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:SnapshotDescription)
+ }
+
private static com.google.protobuf.Descriptors.Descriptor
internal_static_TableSchema_descriptor;
private static
@@ -11134,6 +11863,11 @@ public final class HBaseProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_BytesBytesPair_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_SnapshotDescription_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_SnapshotDescription_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -11185,14 +11919,18 @@ public final class HBaseProtos {
"or\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004na",
"me\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair" +
"\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBy" +
- "tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014*r" +
- "\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" +
- "\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_" +
- "OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007Ke" +
- "yType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022" +
- "\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n" +
- "\007MAXIMUM\020\377\001B>\n*org.apache.hadoop.hbase.p" +
- "rotobuf.generatedB\013HBaseProtosH\001\240\001\001"
+ "tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\"\242" +
+ "\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002(\t\022\r\n\005" +
+ "table\030\002 \001(\t\022\027\n\014creationTime\030\003 \001(\003:\0010\0222\n\004" +
+ "type\030\004 \001(\0162\031.SnapshotDescription.Type:\tT" +
+ "IMESTAMP\"!\n\004Type\022\r\n\tTIMESTAMP\020\000\022\n\n\006GLOBA" +
+ "L\020\001*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_" +
+ "EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GRE" +
+ "ATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*",
+ "_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELE" +
+ "TE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY" +
+ "\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.apache.hadoop.hb" +
+ "ase.protobuf.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11327,6 +12065,14 @@ public final class HBaseProtos {
new java.lang.String[] { "First", "Second", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class);
+ internal_static_SnapshotDescription_descriptor =
+ getDescriptor().getMessageTypes().get(14);
+ internal_static_SnapshotDescription_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_SnapshotDescription_descriptor,
+ new java.lang.String[] { "Name", "Table", "CreationTime", "Type", },
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class,
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class);
return null;
}
};
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
index 13088d9..8979dec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
@@ -14364,6 +14364,3599 @@ public final class MasterAdminProtos {
// @@protoc_insertion_point(class_scope:IsCatalogJanitorEnabledResponse)
}
+ public interface TakeSnapshotRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .SnapshotDescription snapshot = 1;
+ boolean hasSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
+ }
+ public static final class TakeSnapshotRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements TakeSnapshotRequestOrBuilder {
+ // Use TakeSnapshotRequest.newBuilder() to construct.
+ private TakeSnapshotRequest(Builder builder) {
+ super(builder);
+ }
+ private TakeSnapshotRequest(boolean noInit) {}
+
+ private static final TakeSnapshotRequest defaultInstance;
+ public static TakeSnapshotRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TakeSnapshotRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotRequest_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .SnapshotDescription snapshot = 1;
+ public static final int SNAPSHOT_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ return snapshot_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ return snapshot_;
+ }
+
+ private void initFields() {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasSnapshot()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getSnapshot().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, snapshot_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, snapshot_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest) obj;
+
+ boolean result = true;
+ result = result && (hasSnapshot() == other.hasSnapshot());
+ if (hasSnapshot()) {
+ result = result && getSnapshot()
+ .equals(other.getSnapshot());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasSnapshot()) {
+ hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshot().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSnapshotFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (snapshotBuilder_ == null) {
+ result.snapshot_ = snapshot_;
+ } else {
+ result.snapshot_ = snapshotBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDefaultInstance()) return this;
+ if (other.hasSnapshot()) {
+ mergeSnapshot(other.getSnapshot());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasSnapshot()) {
+
+ return false;
+ }
+ if (!getSnapshot().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder();
+ if (hasSnapshot()) {
+ subBuilder.mergeFrom(getSnapshot());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setSnapshot(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .SnapshotDescription snapshot = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ if (snapshotBuilder_ == null) {
+ return snapshot_;
+ } else {
+ return snapshotBuilder_.getMessage();
+ }
+ }
+ public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ snapshot_ = value;
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setSnapshot(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = builderForValue.build();
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) {
+ snapshot_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
+ } else {
+ snapshot_ = value;
+ }
+ onChanged();
+ } else {
+ snapshotBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearSnapshot() {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ onChanged();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getSnapshotFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ if (snapshotBuilder_ != null) {
+ return snapshotBuilder_.getMessageOrBuilder();
+ } else {
+ return snapshot_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotFieldBuilder() {
+ if (snapshotBuilder_ == null) {
+ snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
+ snapshot_,
+ getParentForChildren(),
+ isClean());
+ snapshot_ = null;
+ }
+ return snapshotBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TakeSnapshotRequest)
+ }
+
+ static {
+ defaultInstance = new TakeSnapshotRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TakeSnapshotRequest)
+ }
+
+ public interface TakeSnapshotResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int64 expectedTime = 1;
+ boolean hasExpectedTime();
+ long getExpectedTime();
+ }
+ public static final class TakeSnapshotResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements TakeSnapshotResponseOrBuilder {
+ // Use TakeSnapshotResponse.newBuilder() to construct.
+ private TakeSnapshotResponse(Builder builder) {
+ super(builder);
+ }
+ private TakeSnapshotResponse(boolean noInit) {}
+
+ private static final TakeSnapshotResponse defaultInstance;
+ public static TakeSnapshotResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TakeSnapshotResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotResponse_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required int64 expectedTime = 1;
+ public static final int EXPECTEDTIME_FIELD_NUMBER = 1;
+ private long expectedTime_;
+ public boolean hasExpectedTime() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getExpectedTime() {
+ return expectedTime_;
+ }
+
+ private void initFields() {
+ expectedTime_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasExpectedTime()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt64(1, expectedTime_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(1, expectedTime_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse) obj;
+
+ boolean result = true;
+ result = result && (hasExpectedTime() == other.hasExpectedTime());
+ if (hasExpectedTime()) {
+ result = result && (getExpectedTime()
+ == other.getExpectedTime());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasExpectedTime()) {
+ hash = (37 * hash) + EXPECTEDTIME_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getExpectedTime());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_TakeSnapshotResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ expectedTime_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.expectedTime_ = expectedTime_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance()) return this;
+ if (other.hasExpectedTime()) {
+ setExpectedTime(other.getExpectedTime());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasExpectedTime()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ expectedTime_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required int64 expectedTime = 1;
+ private long expectedTime_ ;
+ public boolean hasExpectedTime() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getExpectedTime() {
+ return expectedTime_;
+ }
+ public Builder setExpectedTime(long value) {
+ bitField0_ |= 0x00000001;
+ expectedTime_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearExpectedTime() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ expectedTime_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:TakeSnapshotResponse)
+ }
+
+ static {
+ defaultInstance = new TakeSnapshotResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:TakeSnapshotResponse)
+ }
+
+ public interface ListSnapshotRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ public static final class ListSnapshotRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements ListSnapshotRequestOrBuilder {
+ // Use ListSnapshotRequest.newBuilder() to construct.
+ private ListSnapshotRequest(Builder builder) {
+ super(builder);
+ }
+ private ListSnapshotRequest(boolean noInit) {}
+
+ private static final ListSnapshotRequest defaultInstance;
+ public static ListSnapshotRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ListSnapshotRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotRequest_fieldAccessorTable;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:ListSnapshotRequest)
+ }
+
+ static {
+ defaultInstance = new ListSnapshotRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ListSnapshotRequest)
+ }
+
+ public interface ListSnapshotResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .SnapshotDescription snapshots = 1;
+ java.util.List
+ getSnapshotsList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshots(int index);
+ int getSnapshotsCount();
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotsOrBuilderList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotsOrBuilder(
+ int index);
+ }
+ public static final class ListSnapshotResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements ListSnapshotResponseOrBuilder {
+ // Use ListSnapshotResponse.newBuilder() to construct.
+ private ListSnapshotResponse(Builder builder) {
+ super(builder);
+ }
+ private ListSnapshotResponse(boolean noInit) {}
+
+ private static final ListSnapshotResponse defaultInstance;
+ public static ListSnapshotResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ListSnapshotResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotResponse_fieldAccessorTable;
+ }
+
+ // repeated .SnapshotDescription snapshots = 1;
+ public static final int SNAPSHOTS_FIELD_NUMBER = 1;
+ private java.util.List snapshots_;
+ public java.util.List getSnapshotsList() {
+ return snapshots_;
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotsOrBuilderList() {
+ return snapshots_;
+ }
+ public int getSnapshotsCount() {
+ return snapshots_.size();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshots(int index) {
+ return snapshots_.get(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotsOrBuilder(
+ int index) {
+ return snapshots_.get(index);
+ }
+
+ private void initFields() {
+ snapshots_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getSnapshotsCount(); i++) {
+ if (!getSnapshots(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < snapshots_.size(); i++) {
+ output.writeMessage(1, snapshots_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < snapshots_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, snapshots_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse) obj;
+
+ boolean result = true;
+ result = result && getSnapshotsList()
+ .equals(other.getSnapshotsList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getSnapshotsCount() > 0) {
+ hash = (37 * hash) + SNAPSHOTS_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshotsList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListSnapshotResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSnapshotsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (snapshotsBuilder_ == null) {
+ snapshots_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ snapshotsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse(this);
+ int from_bitField0_ = bitField0_;
+ if (snapshotsBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ snapshots_ = java.util.Collections.unmodifiableList(snapshots_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.snapshots_ = snapshots_;
+ } else {
+ result.snapshots_ = snapshotsBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance()) return this;
+ if (snapshotsBuilder_ == null) {
+ if (!other.snapshots_.isEmpty()) {
+ if (snapshots_.isEmpty()) {
+ snapshots_ = other.snapshots_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureSnapshotsIsMutable();
+ snapshots_.addAll(other.snapshots_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.snapshots_.isEmpty()) {
+ if (snapshotsBuilder_.isEmpty()) {
+ snapshotsBuilder_.dispose();
+ snapshotsBuilder_ = null;
+ snapshots_ = other.snapshots_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ snapshotsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getSnapshotsFieldBuilder() : null;
+ } else {
+ snapshotsBuilder_.addAllMessages(other.snapshots_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getSnapshotsCount(); i++) {
+ if (!getSnapshots(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addSnapshots(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // repeated .SnapshotDescription snapshots = 1;
+ private java.util.List snapshots_ =
+ java.util.Collections.emptyList();
+ private void ensureSnapshotsIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ snapshots_ = new java.util.ArrayList(snapshots_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotsBuilder_;
+
+ public java.util.List getSnapshotsList() {
+ if (snapshotsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(snapshots_);
+ } else {
+ return snapshotsBuilder_.getMessageList();
+ }
+ }
+ public int getSnapshotsCount() {
+ if (snapshotsBuilder_ == null) {
+ return snapshots_.size();
+ } else {
+ return snapshotsBuilder_.getCount();
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshots(int index) {
+ if (snapshotsBuilder_ == null) {
+ return snapshots_.get(index);
+ } else {
+ return snapshotsBuilder_.getMessage(index);
+ }
+ }
+ public Builder setSnapshots(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSnapshotsIsMutable();
+ snapshots_.set(index, value);
+ onChanged();
+ } else {
+ snapshotsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ public Builder setSnapshots(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotsBuilder_ == null) {
+ ensureSnapshotsIsMutable();
+ snapshots_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ snapshotsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addSnapshots(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSnapshotsIsMutable();
+ snapshots_.add(value);
+ onChanged();
+ } else {
+ snapshotsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ public Builder addSnapshots(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSnapshotsIsMutable();
+ snapshots_.add(index, value);
+ onChanged();
+ } else {
+ snapshotsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ public Builder addSnapshots(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotsBuilder_ == null) {
+ ensureSnapshotsIsMutable();
+ snapshots_.add(builderForValue.build());
+ onChanged();
+ } else {
+ snapshotsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addSnapshots(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotsBuilder_ == null) {
+ ensureSnapshotsIsMutable();
+ snapshots_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ snapshotsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAllSnapshots(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription> values) {
+ if (snapshotsBuilder_ == null) {
+ ensureSnapshotsIsMutable();
+ super.addAll(values, snapshots_);
+ onChanged();
+ } else {
+ snapshotsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ public Builder clearSnapshots() {
+ if (snapshotsBuilder_ == null) {
+ snapshots_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ snapshotsBuilder_.clear();
+ }
+ return this;
+ }
+ public Builder removeSnapshots(int index) {
+ if (snapshotsBuilder_ == null) {
+ ensureSnapshotsIsMutable();
+ snapshots_.remove(index);
+ onChanged();
+ } else {
+ snapshotsBuilder_.remove(index);
+ }
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotsBuilder(
+ int index) {
+ return getSnapshotsFieldBuilder().getBuilder(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotsOrBuilder(
+ int index) {
+ if (snapshotsBuilder_ == null) {
+ return snapshots_.get(index); } else {
+ return snapshotsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotsOrBuilderList() {
+ if (snapshotsBuilder_ != null) {
+ return snapshotsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(snapshots_);
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder addSnapshotsBuilder() {
+ return getSnapshotsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance());
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder addSnapshotsBuilder(
+ int index) {
+ return getSnapshotsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance());
+ }
+ public java.util.List
+ getSnapshotsBuilderList() {
+ return getSnapshotsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotsFieldBuilder() {
+ if (snapshotsBuilder_ == null) {
+ snapshotsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
+ snapshots_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ snapshots_ = null;
+ }
+ return snapshotsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:ListSnapshotResponse)
+ }
+
+ static {
+ defaultInstance = new ListSnapshotResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:ListSnapshotResponse)
+ }
+
+ public interface DeleteSnapshotRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .SnapshotDescription snapshot = 1;
+ boolean hasSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
+ }
+ public static final class DeleteSnapshotRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements DeleteSnapshotRequestOrBuilder {
+ // Use DeleteSnapshotRequest.newBuilder() to construct.
+ private DeleteSnapshotRequest(Builder builder) {
+ super(builder);
+ }
+ private DeleteSnapshotRequest(boolean noInit) {}
+
+ private static final DeleteSnapshotRequest defaultInstance;
+ public static DeleteSnapshotRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DeleteSnapshotRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotRequest_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .SnapshotDescription snapshot = 1;
+ public static final int SNAPSHOT_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ return snapshot_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ return snapshot_;
+ }
+
+ private void initFields() {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasSnapshot()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getSnapshot().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, snapshot_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, snapshot_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest) obj;
+
+ boolean result = true;
+ result = result && (hasSnapshot() == other.hasSnapshot());
+ if (hasSnapshot()) {
+ result = result && getSnapshot()
+ .equals(other.getSnapshot());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasSnapshot()) {
+ hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshot().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSnapshotFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (snapshotBuilder_ == null) {
+ result.snapshot_ = snapshot_;
+ } else {
+ result.snapshot_ = snapshotBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDefaultInstance()) return this;
+ if (other.hasSnapshot()) {
+ mergeSnapshot(other.getSnapshot());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasSnapshot()) {
+
+ return false;
+ }
+ if (!getSnapshot().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder();
+ if (hasSnapshot()) {
+ subBuilder.mergeFrom(getSnapshot());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setSnapshot(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .SnapshotDescription snapshot = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ if (snapshotBuilder_ == null) {
+ return snapshot_;
+ } else {
+ return snapshotBuilder_.getMessage();
+ }
+ }
+ public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ snapshot_ = value;
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setSnapshot(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = builderForValue.build();
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) {
+ snapshot_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
+ } else {
+ snapshot_ = value;
+ }
+ onChanged();
+ } else {
+ snapshotBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearSnapshot() {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ onChanged();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getSnapshotFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ if (snapshotBuilder_ != null) {
+ return snapshotBuilder_.getMessageOrBuilder();
+ } else {
+ return snapshot_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotFieldBuilder() {
+ if (snapshotBuilder_ == null) {
+ snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
+ snapshot_,
+ getParentForChildren(),
+ isClean());
+ snapshot_ = null;
+ }
+ return snapshotBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:DeleteSnapshotRequest)
+ }
+
+ static {
+ defaultInstance = new DeleteSnapshotRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DeleteSnapshotRequest)
+ }
+
+ public interface DeleteSnapshotResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional bool success = 1 [default = true];
+ boolean hasSuccess();
+ boolean getSuccess();
+ }
+ public static final class DeleteSnapshotResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements DeleteSnapshotResponseOrBuilder {
+ // Use DeleteSnapshotResponse.newBuilder() to construct.
+ private DeleteSnapshotResponse(Builder builder) {
+ super(builder);
+ }
+ private DeleteSnapshotResponse(boolean noInit) {}
+
+ private static final DeleteSnapshotResponse defaultInstance;
+ public static DeleteSnapshotResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DeleteSnapshotResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotResponse_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // optional bool success = 1 [default = true];
+ public static final int SUCCESS_FIELD_NUMBER = 1;
+ private boolean success_;
+ public boolean hasSuccess() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public boolean getSuccess() {
+ return success_;
+ }
+
+ private void initFields() {
+ success_ = true;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, success_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, success_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse) obj;
+
+ boolean result = true;
+ result = result && (hasSuccess() == other.hasSuccess());
+ if (hasSuccess()) {
+ result = result && (getSuccess()
+ == other.getSuccess());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasSuccess()) {
+ hash = (37 * hash) + SUCCESS_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getSuccess());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteSnapshotResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ success_ = true;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.success_ = success_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance()) return this;
+ if (other.hasSuccess()) {
+ setSuccess(other.getSuccess());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ success_ = input.readBool();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // optional bool success = 1 [default = true];
+ private boolean success_ = true;
+ public boolean hasSuccess() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public boolean getSuccess() {
+ return success_;
+ }
+ public Builder setSuccess(boolean value) {
+ bitField0_ |= 0x00000001;
+ success_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearSuccess() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ success_ = true;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:DeleteSnapshotResponse)
+ }
+
+ static {
+ defaultInstance = new DeleteSnapshotResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DeleteSnapshotResponse)
+ }
+
+ public interface IsSnapshotDoneRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional .SnapshotDescription snapshot = 1;
+ boolean hasSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
+ }
+ public static final class IsSnapshotDoneRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements IsSnapshotDoneRequestOrBuilder {
+ // Use IsSnapshotDoneRequest.newBuilder() to construct.
+ private IsSnapshotDoneRequest(Builder builder) {
+ super(builder);
+ }
+ private IsSnapshotDoneRequest(boolean noInit) {}
+
+ private static final IsSnapshotDoneRequest defaultInstance;
+ public static IsSnapshotDoneRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public IsSnapshotDoneRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneRequest_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // optional .SnapshotDescription snapshot = 1;
+ public static final int SNAPSHOT_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ return snapshot_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ return snapshot_;
+ }
+
+ private void initFields() {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (hasSnapshot()) {
+ if (!getSnapshot().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, snapshot_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, snapshot_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest) obj;
+
+ boolean result = true;
+ result = result && (hasSnapshot() == other.hasSnapshot());
+ if (hasSnapshot()) {
+ result = result && getSnapshot()
+ .equals(other.getSnapshot());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasSnapshot()) {
+ hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshot().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSnapshotFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (snapshotBuilder_ == null) {
+ result.snapshot_ = snapshot_;
+ } else {
+ result.snapshot_ = snapshotBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDefaultInstance()) return this;
+ if (other.hasSnapshot()) {
+ mergeSnapshot(other.getSnapshot());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (hasSnapshot()) {
+ if (!getSnapshot().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder();
+ if (hasSnapshot()) {
+ subBuilder.mergeFrom(getSnapshot());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setSnapshot(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // optional .SnapshotDescription snapshot = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ if (snapshotBuilder_ == null) {
+ return snapshot_;
+ } else {
+ return snapshotBuilder_.getMessage();
+ }
+ }
+ public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ snapshot_ = value;
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setSnapshot(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = builderForValue.build();
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) {
+ snapshot_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
+ } else {
+ snapshot_ = value;
+ }
+ onChanged();
+ } else {
+ snapshotBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearSnapshot() {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ onChanged();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getSnapshotFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ if (snapshotBuilder_ != null) {
+ return snapshotBuilder_.getMessageOrBuilder();
+ } else {
+ return snapshot_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotFieldBuilder() {
+ if (snapshotBuilder_ == null) {
+ snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
+ snapshot_,
+ getParentForChildren(),
+ isClean());
+ snapshot_ = null;
+ }
+ return snapshotBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:IsSnapshotDoneRequest)
+ }
+
+ static {
+ defaultInstance = new IsSnapshotDoneRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:IsSnapshotDoneRequest)
+ }
+
+ public interface IsSnapshotDoneResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional bool done = 1 [default = false];
+ boolean hasDone();
+ boolean getDone();
+
+ // optional .SnapshotDescription snapshot = 2;
+ boolean hasSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
+ }
+ public static final class IsSnapshotDoneResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements IsSnapshotDoneResponseOrBuilder {
+ // Use IsSnapshotDoneResponse.newBuilder() to construct.
+ private IsSnapshotDoneResponse(Builder builder) {
+ super(builder);
+ }
+ private IsSnapshotDoneResponse(boolean noInit) {}
+
+ private static final IsSnapshotDoneResponse defaultInstance;
+ public static IsSnapshotDoneResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public IsSnapshotDoneResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneResponse_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // optional bool done = 1 [default = false];
+ public static final int DONE_FIELD_NUMBER = 1;
+ private boolean done_;
+ public boolean hasDone() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public boolean getDone() {
+ return done_;
+ }
+
+ // optional .SnapshotDescription snapshot = 2;
+ public static final int SNAPSHOT_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ return snapshot_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ return snapshot_;
+ }
+
+ private void initFields() {
+ done_ = false;
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (hasSnapshot()) {
+ if (!getSnapshot().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, done_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, snapshot_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, done_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, snapshot_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse) obj;
+
+ boolean result = true;
+ result = result && (hasDone() == other.hasDone());
+ if (hasDone()) {
+ result = result && (getDone()
+ == other.getDone());
+ }
+ result = result && (hasSnapshot() == other.hasSnapshot());
+ if (hasSnapshot()) {
+ result = result && getSnapshot()
+ .equals(other.getSnapshot());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasDone()) {
+ hash = (37 * hash) + DONE_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getDone());
+ }
+ if (hasSnapshot()) {
+ hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshot().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_IsSnapshotDoneResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSnapshotFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ done_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.done_ = done_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (snapshotBuilder_ == null) {
+ result.snapshot_ = snapshot_;
+ } else {
+ result.snapshot_ = snapshotBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance()) return this;
+ if (other.hasDone()) {
+ setDone(other.getDone());
+ }
+ if (other.hasSnapshot()) {
+ mergeSnapshot(other.getSnapshot());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (hasSnapshot()) {
+ if (!getSnapshot().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ done_ = input.readBool();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder();
+ if (hasSnapshot()) {
+ subBuilder.mergeFrom(getSnapshot());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setSnapshot(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // optional bool done = 1 [default = false];
+ private boolean done_ ;
+ public boolean hasDone() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public boolean getDone() {
+ return done_;
+ }
+ public Builder setDone(boolean value) {
+ bitField0_ |= 0x00000001;
+ done_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearDone() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ done_ = false;
+ onChanged();
+ return this;
+ }
+
+ // optional .SnapshotDescription snapshot = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
+ if (snapshotBuilder_ == null) {
+ return snapshot_;
+ } else {
+ return snapshotBuilder_.getMessage();
+ }
+ }
+ public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ snapshot_ = value;
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder setSnapshot(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = builderForValue.build();
+ onChanged();
+ } else {
+ snapshotBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) {
+ if (snapshotBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) {
+ snapshot_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
+ } else {
+ snapshot_ = value;
+ }
+ onChanged();
+ } else {
+ snapshotBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder clearSnapshot() {
+ if (snapshotBuilder_ == null) {
+ snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+ onChanged();
+ } else {
+ snapshotBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getSnapshotFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
+ if (snapshotBuilder_ != null) {
+ return snapshotBuilder_.getMessageOrBuilder();
+ } else {
+ return snapshot_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
+ getSnapshotFieldBuilder() {
+ if (snapshotBuilder_ == null) {
+ snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
+ snapshot_,
+ getParentForChildren(),
+ isClean());
+ snapshot_ = null;
+ }
+ return snapshotBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:IsSnapshotDoneResponse)
+ }
+
+ static {
+ defaultInstance = new IsSnapshotDoneResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:IsSnapshotDoneResponse)
+ }
+
public static abstract class MasterAdminService
implements com.google.protobuf.Service {
protected MasterAdminService() {}
@@ -14464,6 +18057,26 @@ public final class MasterAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request,
com.google.protobuf.RpcCallback done);
+ public abstract void snapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public abstract void listSnapshots(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public abstract void deleteSnapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public abstract void isSnapshotDone(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -14621,6 +18234,38 @@ public final class MasterAdminProtos {
impl.isCatalogJanitorEnabled(controller, request, done);
}
+ @java.lang.Override
+ public void snapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.snapshot(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void listSnapshots(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.listSnapshots(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void deleteSnapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.deleteSnapshot(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void isSnapshotDone(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.isSnapshotDone(controller, request, done);
+ }
+
};
}
@@ -14681,6 +18326,14 @@ public final class MasterAdminProtos {
return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request);
case 18:
return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request);
+ case 19:
+ return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest)request);
+ case 20:
+ return impl.listSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest)request);
+ case 21:
+ return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest)request);
+ case 22:
+ return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -14733,6 +18386,14 @@ public final class MasterAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance();
case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
+ case 19:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDefaultInstance();
+ case 20:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDefaultInstance();
+ case 21:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDefaultInstance();
+ case 22:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -14785,6 +18446,14 @@ public final class MasterAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance();
case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
+ case 19:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance();
+ case 20:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance();
+ case 21:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance();
+ case 22:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -14888,6 +18557,26 @@ public final class MasterAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request,
com.google.protobuf.RpcCallback done);
+ public abstract void snapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public abstract void listSnapshots(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public abstract void deleteSnapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public abstract void isSnapshotDone(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -15005,6 +18694,26 @@ public final class MasterAdminProtos {
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 19:
+ this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 20:
+ this.listSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 21:
+ this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 22:
+ this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -15057,6 +18766,14 @@ public final class MasterAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance();
case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
+ case 19:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDefaultInstance();
+ case 20:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDefaultInstance();
+ case 21:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDefaultInstance();
+ case 22:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -15109,6 +18826,14 @@ public final class MasterAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance();
case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
+ case 19:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance();
+ case 20:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance();
+ case 21:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance();
+ case 22:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -15414,6 +19139,66 @@ public final class MasterAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()));
}
+
+ public void snapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(19),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance()));
+ }
+
+ public void listSnapshots(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(20),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance()));
+ }
+
+ public void deleteSnapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(21),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance()));
+ }
+
+ public void isSnapshotDone(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(22),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -15516,6 +19301,26 @@ public final class MasterAdminProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse snapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse listSnapshots(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse deleteSnapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse isSnapshotDone(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -15752,6 +19557,54 @@ public final class MasterAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse snapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(19),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse listSnapshots(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(20),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse deleteSnapshot(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(21),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse isSnapshotDone(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(22),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance());
+ }
+
}
}
@@ -15945,6 +19798,46 @@ public final class MasterAdminProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TakeSnapshotRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TakeSnapshotRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_TakeSnapshotResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_TakeSnapshotResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ListSnapshotRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ListSnapshotRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ListSnapshotResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ListSnapshotResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteSnapshotRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteSnapshotRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteSnapshotResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteSnapshotResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_IsSnapshotDoneRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_IsSnapshotDoneRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_IsSnapshotDoneResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_IsSnapshotDoneResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -15994,39 +19887,57 @@ public final class MasterAdminProtos {
"\010\"1\n\034EnableCatalogJanitorResponse\022\021\n\tpre" +
"vValue\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledR" +
"equest\"0\n\037IsCatalogJanitorEnabledRespons",
- "e\022\r\n\005value\030\001 \002(\0102\263\t\n\022MasterAdminService\022" +
- "2\n\taddColumn\022\021.AddColumnRequest\032\022.AddCol" +
- "umnResponse\022;\n\014deleteColumn\022\024.DeleteColu" +
- "mnRequest\032\025.DeleteColumnResponse\022;\n\014modi" +
- "fyColumn\022\024.ModifyColumnRequest\032\025.ModifyC" +
- "olumnResponse\0225\n\nmoveRegion\022\022.MoveRegion" +
- "Request\032\023.MoveRegionResponse\022;\n\014assignRe" +
- "gion\022\024.AssignRegionRequest\032\025.AssignRegio" +
- "nResponse\022A\n\016unassignRegion\022\026.UnassignRe" +
- "gionRequest\032\027.UnassignRegionResponse\022>\n\r",
- "offlineRegion\022\025.OfflineRegionRequest\032\026.O" +
- "fflineRegionResponse\0228\n\013deleteTable\022\023.De" +
- "leteTableRequest\032\024.DeleteTableResponse\0228" +
- "\n\013enableTable\022\023.EnableTableRequest\032\024.Ena" +
- "bleTableResponse\022;\n\014disableTable\022\024.Disab" +
- "leTableRequest\032\025.DisableTableResponse\0228\n" +
- "\013modifyTable\022\023.ModifyTableRequest\032\024.Modi" +
- "fyTableResponse\0228\n\013createTable\022\023.CreateT" +
- "ableRequest\032\024.CreateTableResponse\022/\n\010shu" +
- "tdown\022\020.ShutdownRequest\032\021.ShutdownRespon",
- "se\0225\n\nstopMaster\022\022.StopMasterRequest\032\023.S" +
- "topMasterResponse\022,\n\007balance\022\017.BalanceRe" +
- "quest\032\020.BalanceResponse\022M\n\022setBalancerRu" +
- "nning\022\032.SetBalancerRunningRequest\032\033.SetB" +
- "alancerRunningResponse\022;\n\016runCatalogScan" +
- "\022\023.CatalogScanRequest\032\024.CatalogScanRespo" +
- "nse\022S\n\024enableCatalogJanitor\022\034.EnableCata" +
- "logJanitorRequest\032\035.EnableCatalogJanitor" +
- "Response\022\\\n\027isCatalogJanitorEnabled\022\037.Is" +
- "CatalogJanitorEnabledRequest\032 .IsCatalog",
- "JanitorEnabledResponseBG\n*org.apache.had" +
- "oop.hbase.protobuf.generatedB\021MasterAdmi" +
- "nProtosH\001\210\001\001\240\001\001"
+ "e\022\r\n\005value\030\001 \002(\010\"=\n\023TakeSnapshotRequest\022" +
+ "&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescription\"" +
+ ",\n\024TakeSnapshotResponse\022\024\n\014expectedTime\030" +
+ "\001 \002(\003\"\025\n\023ListSnapshotRequest\"?\n\024ListSnap" +
+ "shotResponse\022\'\n\tsnapshots\030\001 \003(\0132\024.Snapsh" +
+ "otDescription\"?\n\025DeleteSnapshotRequest\022&" +
+ "\n\010snapshot\030\001 \002(\0132\024.SnapshotDescription\"/" +
+ "\n\026DeleteSnapshotResponse\022\025\n\007success\030\001 \001(" +
+ "\010:\004true\"?\n\025IsSnapshotDoneRequest\022&\n\010snap" +
+ "shot\030\001 \001(\0132\024.SnapshotDescription\"U\n\026IsSn",
+ "apshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022" +
+ "&\n\010snapshot\030\002 \001(\0132\024.SnapshotDescription2" +
+ "\260\013\n\022MasterAdminService\0222\n\taddColumn\022\021.Ad" +
+ "dColumnRequest\032\022.AddColumnResponse\022;\n\014de" +
+ "leteColumn\022\024.DeleteColumnRequest\032\025.Delet" +
+ "eColumnResponse\022;\n\014modifyColumn\022\024.Modify" +
+ "ColumnRequest\032\025.ModifyColumnResponse\0225\n\n" +
+ "moveRegion\022\022.MoveRegionRequest\032\023.MoveReg" +
+ "ionResponse\022;\n\014assignRegion\022\024.AssignRegi" +
+ "onRequest\032\025.AssignRegionResponse\022A\n\016unas",
+ "signRegion\022\026.UnassignRegionRequest\032\027.Una" +
+ "ssignRegionResponse\022>\n\rofflineRegion\022\025.O" +
+ "fflineRegionRequest\032\026.OfflineRegionRespo" +
+ "nse\0228\n\013deleteTable\022\023.DeleteTableRequest\032" +
+ "\024.DeleteTableResponse\0228\n\013enableTable\022\023.E" +
+ "nableTableRequest\032\024.EnableTableResponse\022" +
+ ";\n\014disableTable\022\024.DisableTableRequest\032\025." +
+ "DisableTableResponse\0228\n\013modifyTable\022\023.Mo" +
+ "difyTableRequest\032\024.ModifyTableResponse\0228" +
+ "\n\013createTable\022\023.CreateTableRequest\032\024.Cre",
+ "ateTableResponse\022/\n\010shutdown\022\020.ShutdownR" +
+ "equest\032\021.ShutdownResponse\0225\n\nstopMaster\022" +
+ "\022.StopMasterRequest\032\023.StopMasterResponse" +
+ "\022,\n\007balance\022\017.BalanceRequest\032\020.BalanceRe" +
+ "sponse\022M\n\022setBalancerRunning\022\032.SetBalanc" +
+ "erRunningRequest\032\033.SetBalancerRunningRes" +
+ "ponse\022;\n\016runCatalogScan\022\023.CatalogScanReq" +
+ "uest\032\024.CatalogScanResponse\022S\n\024enableCata" +
+ "logJanitor\022\034.EnableCatalogJanitorRequest" +
+ "\032\035.EnableCatalogJanitorResponse\022\\\n\027isCat",
+ "alogJanitorEnabled\022\037.IsCatalogJanitorEna" +
+ "bledRequest\032 .IsCatalogJanitorEnabledRes" +
+ "ponse\0227\n\010snapshot\022\024.TakeSnapshotRequest\032" +
+ "\025.TakeSnapshotResponse\022<\n\rlistSnapshots\022" +
+ "\024.ListSnapshotRequest\032\025.ListSnapshotResp" +
+ "onse\022A\n\016deleteSnapshot\022\026.DeleteSnapshotR" +
+ "equest\032\027.DeleteSnapshotResponse\022A\n\016isSna" +
+ "pshotDone\022\026.IsSnapshotDoneRequest\032\027.IsSn" +
+ "apshotDoneResponseBG\n*org.apache.hadoop." +
+ "hbase.protobuf.generatedB\021MasterAdminPro",
+ "tosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -16337,6 +20248,70 @@ public final class MasterAdminProtos {
new java.lang.String[] { "Value", },
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.Builder.class);
+ internal_static_TakeSnapshotRequest_descriptor =
+ getDescriptor().getMessageTypes().get(38);
+ internal_static_TakeSnapshotRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TakeSnapshotRequest_descriptor,
+ new java.lang.String[] { "Snapshot", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.Builder.class);
+ internal_static_TakeSnapshotResponse_descriptor =
+ getDescriptor().getMessageTypes().get(39);
+ internal_static_TakeSnapshotResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_TakeSnapshotResponse_descriptor,
+ new java.lang.String[] { "ExpectedTime", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.Builder.class);
+ internal_static_ListSnapshotRequest_descriptor =
+ getDescriptor().getMessageTypes().get(40);
+ internal_static_ListSnapshotRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ListSnapshotRequest_descriptor,
+ new java.lang.String[] { },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.Builder.class);
+ internal_static_ListSnapshotResponse_descriptor =
+ getDescriptor().getMessageTypes().get(41);
+ internal_static_ListSnapshotResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ListSnapshotResponse_descriptor,
+ new java.lang.String[] { "Snapshots", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.Builder.class);
+ internal_static_DeleteSnapshotRequest_descriptor =
+ getDescriptor().getMessageTypes().get(42);
+ internal_static_DeleteSnapshotRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_DeleteSnapshotRequest_descriptor,
+ new java.lang.String[] { "Snapshot", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.Builder.class);
+ internal_static_DeleteSnapshotResponse_descriptor =
+ getDescriptor().getMessageTypes().get(43);
+ internal_static_DeleteSnapshotResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_DeleteSnapshotResponse_descriptor,
+ new java.lang.String[] { "Success", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.Builder.class);
+ internal_static_IsSnapshotDoneRequest_descriptor =
+ getDescriptor().getMessageTypes().get(44);
+ internal_static_IsSnapshotDoneRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_IsSnapshotDoneRequest_descriptor,
+ new java.lang.String[] { "Snapshot", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.Builder.class);
+ internal_static_IsSnapshotDoneResponse_descriptor =
+ getDescriptor().getMessageTypes().get(45);
+ internal_static_IsSnapshotDoneResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_IsSnapshotDoneResponse_descriptor,
+ new java.lang.String[] { "Done", "Snapshot", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.Builder.class);
return null;
}
};
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
new file mode 100644
index 0000000..cf80de7
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * General exception when a snapshot fails.
+ */
+@SuppressWarnings("serial")
+public class HBaseSnapshotException extends IOException {
+
+ private SnapshotDescription description;
+
+ public HBaseSnapshotException(String msg) {
+ super(msg);
+ }
+
+ public HBaseSnapshotException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ public HBaseSnapshotException(Throwable cause) {
+ super(cause);
+ }
+
+ public HBaseSnapshotException(String msg, SnapshotDescription desc) {
+ super(msg);
+ this.description = desc;
+ }
+
+ public HBaseSnapshotException(Throwable cause, SnapshotDescription desc) {
+ super(cause);
+ this.description = desc;
+ }
+
+ public HBaseSnapshotException(String msg, Throwable cause, SnapshotDescription desc) {
+ super(msg, cause);
+ this.description = desc;
+ }
+
+ public SnapshotDescription getSnapshotDescription() {
+ return this.description;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java
new file mode 100644
index 0000000..c6cb71c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCreationException.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+/**
+ * Thrown when a snapshot could not be created due to a server-side error when taking the snapshot.
+ */
+@SuppressWarnings("serial")
+public class SnapshotCreationException extends HBaseSnapshotException {
+
+ public SnapshotCreationException(String msg, SnapshotDescription desc) {
+ super(msg, desc);
+ }
+
+ public SnapshotCreationException(String msg, Throwable cause, SnapshotDescription desc) {
+ super(msg, cause, desc);
+ }
+
+ public SnapshotCreationException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+
+ public SnapshotCreationException(String msg) {
+ super(msg);
+ }
+
+ public SnapshotCreationException(Throwable cause, SnapshotDescription desc) {
+ super(cause, desc);
+ }
+
+ public SnapshotCreationException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/hbase-server/src/main/protobuf/MasterAdmin.proto b/hbase-server/src/main/protobuf/MasterAdmin.proto
index f3e193e..ece40c6 100644
--- a/hbase-server/src/main/protobuf/MasterAdmin.proto
+++ b/hbase-server/src/main/protobuf/MasterAdmin.proto
@@ -176,6 +176,41 @@ message IsCatalogJanitorEnabledResponse {
required bool value = 1;
}
+message TakeSnapshotRequest{
+ required SnapshotDescription snapshot = 1;
+}
+
+message TakeSnapshotResponse{
+ required int64 expectedTime = 1;
+}
+
+message ListSnapshotRequest{
+}
+
+message ListSnapshotResponse{
+ repeated SnapshotDescription snapshots = 1;
+}
+
+message DeleteSnapshotRequest{
+ required SnapshotDescription snapshot = 1;
+}
+
+message DeleteSnapshotResponse{
+ optional bool success = 1 [default = true];
+}
+
+/* if you don't send the snapshot, then you will get it back
+ * in the response (if the snapshot is done) so you can check the snapshot
+ */
+message IsSnapshotDoneRequest{
+ optional SnapshotDescription snapshot = 1;
+}
+
+message IsSnapshotDoneResponse{
+ optional bool done = 1 [default = false];
+ optional SnapshotDescription snapshot = 2;
+}
+
service MasterAdminService {
/** Adds a column to the specified table. */
rpc addColumn(AddColumnRequest)
@@ -273,4 +308,27 @@ service MasterAdminService {
*/
rpc isCatalogJanitorEnabled(IsCatalogJanitorEnabledRequest)
returns(IsCatalogJanitorEnabledResponse);
-}
+
+ /**
+ * Create a snapshot for the given table.
+ * @param snapshot description of the snapshot to take
+ */
+ rpc snapshot(TakeSnapshotRequest) returns(TakeSnapshotResponse);
+
+ /**
+ * List existing snapshots.
+ * @return a list of snapshot descriptors
+ */
+ rpc listSnapshots(ListSnapshotRequest) returns(ListSnapshotResponse);
+
+ /**
+ * Delete an existing snapshot. This method can also be used to clean up a aborted snapshot.
+ * @param snapshotName snapshot to delete
+ */
+ rpc deleteSnapshot(DeleteSnapshotRequest) returns(DeleteSnapshotResponse);
+
+ /**
+ * Determine if the snapshot is done yet.
+ */
+ rpc isSnapshotDone(IsSnapshotDoneRequest) returns(IsSnapshotDoneResponse);
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/protobuf/hbase.proto b/hbase-server/src/main/protobuf/hbase.proto
index b953e47..c5fff74 100644
--- a/hbase-server/src/main/protobuf/hbase.proto
+++ b/hbase-server/src/main/protobuf/hbase.proto
@@ -261,3 +261,17 @@ message BytesBytesPair {
required bytes first = 1;
required bytes second = 2;
}
+
+/**
+ * Description of the snapshot to take
+ */
+message SnapshotDescription {
+ required string name = 1;
+ optional string table = 2; // not needed for delete, but checked for in taking snapshot
+ optional int64 creationTime = 3 [default = 0];
+ enum Type {
+ TIMESTAMP = 0;
+ GLOBAL = 1;
+ }
+ optional Type type = 4 [default = TIMESTAMP];
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java
new file mode 100644
index 0000000..05ddb57
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotsFromAdmin.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.protobuf.RpcController;
+
+/**
+ * Test snapshot logic from the client
+ */
+@Category(SmallTests.class)
+public class TestSnapshotsFromAdmin {
+
+ private static final Log LOG = LogFactory.getLog(TestSnapshotsFromAdmin.class);
+
+ /**
+ * Test that the logic for doing 'correct' back-off based on exponential increase and the max-time
+ * passed from the server ensures the correct overall waiting for the snapshot to finish.
+ * @throws Exception
+ */
+ @Test(timeout = 10000)
+ public void testBackoffLogic() throws Exception {
+ final int maxWaitTime = 7500;
+ final int numRetries = 10;
+ final int pauseTime = 500;
+ // calculate the wait time, if we just do straight backoff (ignoring the expected time from
+ // master)
+ long ignoreExpectedTime = 0;
+ for (int i = 0; i < 6; i++) {
+ ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
+ }
+ // the correct wait time, capping at the maxTime/tries + fudge room
+ final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
+ assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time "
+ + "- further testing won't prove anything.", time < ignoreExpectedTime);
+
+ // setup the mocks
+ HConnectionManager.HConnectionImplementation mockConnection = Mockito
+ .mock(HConnectionManager.HConnectionImplementation.class);
+ Configuration conf = HBaseConfiguration.create();
+ // setup the conf to match the expected properties
+ conf.setInt("hbase.client.retries.number", numRetries);
+ conf.setLong("hbase.client.pause", pauseTime);
+ // mock the master admin to our mock
+ MasterAdminKeepAliveConnection mockMaster = Mockito.mock(MasterAdminKeepAliveConnection.class);
+ Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+ Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(mockMaster);
+ // set the max wait time for the snapshot to complete
+ TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder().setExpectedTime(maxWaitTime)
+ .build();
+ Mockito
+ .when(
+ mockMaster.snapshot((RpcController) Mockito.isNull(),
+ Mockito.any(TakeSnapshotRequest.class))).thenReturn(response);
+ // setup the response
+ IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
+ builder.setDone(false);
+ // first five times, we return false, last we get success
+ Mockito.when(
+ mockMaster.isSnapshotDone((RpcController) Mockito.isNull(),
+ Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(builder.build(), builder.build(),
+ builder.build(), builder.build(), builder.build(), builder.setDone(true).build());
+
+ // setup the admin and run the test
+ HBaseAdmin admin = new HBaseAdmin(mockConnection);
+ String snapshot = "snasphot";
+ String table = "table";
+ // get start time
+ long start = System.currentTimeMillis();
+ admin.snapshot(snapshot, table);
+ long finish = System.currentTimeMillis();
+ long elapsed = (finish - start);
+ assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
+ }
+
+ /**
+ * Make sure that we validate the snapshot name and the table name before we pass anything across
+ * the wire
+ * @throws IOException on failure
+ */
+ @Test
+ public void testValidateSnapshotName() throws IOException {
+ HConnectionManager.HConnectionImplementation mockConnection = Mockito
+ .mock(HConnectionManager.HConnectionImplementation.class);
+ Configuration conf = HBaseConfiguration.create();
+ Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
+ HBaseAdmin admin = new HBaseAdmin(mockConnection);
+ SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
+ // check that invalid snapshot names fail
+ failSnapshotStart(admin, builder.setName(".snapshot").build());
+ failSnapshotStart(admin, builder.setName("-snapshot").build());
+ failSnapshotStart(admin, builder.setName("snapshot fails").build());
+ failSnapshotStart(admin, builder.setName("snap$hot").build());
+ // check the table name also get verified
+ failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build());
+ failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build());
+ failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build());
+ failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
+ }
+
+ private void failSnapshotStart(HBaseAdmin admin, SnapshotDescription snapshot) throws IOException {
+ try {
+ admin.snapshot(snapshot);
+ fail("Snapshot should not have succeed with name:" + snapshot.getName());
+ } catch (IllegalArgumentException e) {
+ LOG.debug("Correctly failed to start snapshot:" + e.getMessage());
+ }
+ }
+}
\ No newline at end of file