diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java deleted file mode 100644 index be5ffea..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ /dev/null @@ -1,504 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; -import org.apache.hadoop.hbase.util.Bytes; - - -/** - * An object to encapsulate the information for each backup request - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupInfo implements Comparable { - private static final Log LOG = LogFactory.getLog(BackupInfo.class); - - public static interface Filter { - - /** - * Filter interface - * @param info: backup info - * @return true if info passes filter, false otherwise - */ - public boolean apply(BackupInfo info); - } - // backup status flag - public static enum BackupState { - WAITING, RUNNING, COMPLETE, FAILED, ANY; - } - - // backup phase - public static enum BackupPhase { - SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; - } - - // backup id: a timestamp when we request the backup - private String backupId; - - // backup type, full or incremental - private BackupType type; - - // target root directory for storing the backup files - private String targetRootDir; - - // overall backup state - private BackupState state; - - // overall backup phase - private BackupPhase phase; - - // overall backup failure message - private String failedMsg; - - // backup status map for all tables - private Map backupStatusMap; - - // actual start timestamp of the backup process - private long startTs; - - // actual end timestamp of the backup process, could be fail or complete - private long endTs; - - // the total bytes of incremental logs copied - private long totalBytesCopied; - - // for incremental backup, the location of the backed-up hlogs - private String hlogTargetDir = null; - - // incremental backup file list - transient private List incrBackupFileList; - - // new region server log timestamps for table set after distributed log roll - // key - table name, value - map of RegionServer hostname -> last log rolled timestamp - transient private HashMap> tableSetTimestampMap; - - // backup progress in %% (0-100) - private int progress; - - // distributed job id - private String jobId; - - // Number of parallel workers. -1 - system defined - private int workers = -1; - - // Bandwidth per worker in MB per sec. -1 - unlimited - private long bandwidth = -1; - - public BackupInfo() { - backupStatusMap = new HashMap(); - } - - public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) { - this(); - this.backupId = backupId; - this.type = type; - this.targetRootDir = targetRootDir; - if (LOG.isDebugEnabled()) { - LOG.debug("CreateBackupContext: " + tables.length + " " + tables[0]); - } - this.addTables(tables); - - if (type == BackupType.INCREMENTAL) { - setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); - } - - this.startTs = 0; - this.endTs = 0; - } - - public String getJobId() { - return jobId; - } - - public void setJobId(String jobId) { - this.jobId = jobId; - } - - public int getWorkers() { - return workers; - } - - public void setWorkers(int workers) { - this.workers = workers; - } - - public long getBandwidth() { - return bandwidth; - } - - public void setBandwidth(long bandwidth) { - this.bandwidth = bandwidth; - } - - public void setBackupStatusMap(Map backupStatusMap) { - this.backupStatusMap = backupStatusMap; - } - - public HashMap> getTableSetTimestampMap() { - return tableSetTimestampMap; - } - - public void - setTableSetTimestampMap(HashMap> tableSetTimestampMap) { - this.tableSetTimestampMap = tableSetTimestampMap; - } - - public String getHlogTargetDir() { - return hlogTargetDir; - } - - public void setType(BackupType type) { - this.type = type; - } - - public void setTargetRootDir(String targetRootDir) { - this.targetRootDir = targetRootDir; - } - - public void setTotalBytesCopied(long totalBytesCopied) { - this.totalBytesCopied = totalBytesCopied; - } - - /** - * Set progress (0-100%) - * @param msg progress value - */ - - public void setProgress(int p) { - this.progress = p; - } - - /** - * Get current progress - */ - public int getProgress() { - return progress; - } - - public String getBackupId() { - return backupId; - } - - public void setBackupId(String backupId) { - this.backupId = backupId; - } - - public BackupStatus getBackupStatus(TableName table) { - return this.backupStatusMap.get(table); - } - - public String getFailedMsg() { - return failedMsg; - } - - public void setFailedMsg(String failedMsg) { - this.failedMsg = failedMsg; - } - - public long getStartTs() { - return startTs; - } - - public void setStartTs(long startTs) { - this.startTs = startTs; - } - - public long getEndTs() { - return endTs; - } - - public void setEndTs(long endTs) { - this.endTs = endTs; - } - - public long getTotalBytesCopied() { - return totalBytesCopied; - } - - public BackupState getState() { - return state; - } - - public void setState(BackupState flag) { - this.state = flag; - } - - public BackupPhase getPhase() { - return phase; - } - - public void setPhase(BackupPhase phase) { - this.phase = phase; - } - - public BackupType getType() { - return type; - } - - public void setSnapshotName(TableName table, String snapshotName) { - this.backupStatusMap.get(table).setSnapshotName(snapshotName); - } - - public String getSnapshotName(TableName table) { - return this.backupStatusMap.get(table).getSnapshotName(); - } - - public List getSnapshotNames() { - List snapshotNames = new ArrayList(); - for (BackupStatus backupStatus : this.backupStatusMap.values()) { - snapshotNames.add(backupStatus.getSnapshotName()); - } - return snapshotNames; - } - - public Set getTables() { - return this.backupStatusMap.keySet(); - } - - public List getTableNames() { - return new ArrayList(backupStatusMap.keySet()); - } - - public void addTables(TableName[] tables) { - for (TableName table : tables) { - BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); - this.backupStatusMap.put(table, backupStatus); - } - } - - public void setTables(List tables) { - this.backupStatusMap.clear(); - for (TableName table : tables) { - BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); - this.backupStatusMap.put(table, backupStatus); - } - } - - public String getTargetRootDir() { - return targetRootDir; - } - - public void setHlogTargetDir(String hlogTagetDir) { - this.hlogTargetDir = hlogTagetDir; - } - - public String getHLogTargetDir() { - return hlogTargetDir; - } - - public List getIncrBackupFileList() { - return incrBackupFileList; - } - - public void setIncrBackupFileList(List incrBackupFileList) { - this.incrBackupFileList = incrBackupFileList; - } - - /** - * Set the new region server log timestamps after distributed log roll - * @param newTableSetTimestampMap table timestamp map - */ - public void - setIncrTimestampMap(HashMap> newTableSetTimestampMap) { - this.tableSetTimestampMap = newTableSetTimestampMap; - } - - /** - * Get new region server log timestamps after distributed log roll - * @return new region server log timestamps - */ - public HashMap> getIncrTimestampMap() { - return this.tableSetTimestampMap; - } - - public TableName getTableBySnapshot(String snapshotName) { - for (Entry entry : this.backupStatusMap.entrySet()) { - if (snapshotName.equals(entry.getValue().getSnapshotName())) { - return entry.getKey(); - } - } - return null; - } - - public BackupProtos.BackupInfo toProtosBackupInfo() { - BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder(); - builder.setBackupId(getBackupId()); - setBackupStatusMap(builder); - builder.setEndTs(getEndTs()); - if (getFailedMsg() != null) { - builder.setFailedMessage(getFailedMsg()); - } - if (getState() != null) { - builder.setState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name())); - } - if (getPhase() != null) { - builder.setPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name())); - } - - builder.setProgress(getProgress()); - builder.setStartTs(getStartTs()); - builder.setTargetRootDir(getTargetRootDir()); - builder.setType(BackupProtos.BackupType.valueOf(getType().name())); - builder.setWorkersNumber(workers); - builder.setBandwidth(bandwidth); - if (jobId != null) { - builder.setJobId(jobId); - } - return builder.build(); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof BackupInfo) { - BackupInfo other = (BackupInfo) obj; - try { - return Bytes.equals(toByteArray(), other.toByteArray()); - } catch (IOException e) { - LOG.error(e); - return false; - } - } else { - return false; - } - } - - public byte[] toByteArray() throws IOException { - return toProtosBackupInfo().toByteArray(); - } - - private void setBackupStatusMap(Builder builder) { - for (Entry entry : backupStatusMap.entrySet()) { - builder.addTableBackupStatus(entry.getValue().toProto()); - } - } - - public static BackupInfo fromByteArray(byte[] data) throws IOException { - return fromProto(BackupProtos.BackupInfo.parseFrom(data)); - } - - public static BackupInfo fromStream(final InputStream stream) throws IOException { - return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream)); - } - - public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { - BackupInfo context = new BackupInfo(); - context.setBackupId(proto.getBackupId()); - context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); - context.setEndTs(proto.getEndTs()); - if (proto.hasFailedMessage()) { - context.setFailedMsg(proto.getFailedMessage()); - } - if (proto.hasState()) { - context.setState(BackupInfo.BackupState.valueOf(proto.getState().name())); - } - - context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), - proto.getBackupId())); - - if (proto.hasPhase()) { - context.setPhase(BackupPhase.valueOf(proto.getPhase().name())); - } - if (proto.hasProgress()) { - context.setProgress(proto.getProgress()); - } - context.setStartTs(proto.getStartTs()); - context.setTargetRootDir(proto.getTargetRootDir()); - context.setType(BackupType.valueOf(proto.getType().name())); - context.setWorkers(proto.getWorkersNumber()); - context.setBandwidth(proto.getBandwidth()); - if (proto.hasJobId()) { - context.setJobId(proto.getJobId()); - } - return context; - } - - private static Map toMap(List list) { - HashMap map = new HashMap<>(); - for (TableBackupStatus tbs : list) { - map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); - } - return map; - } - - public String getShortDescription() { - StringBuilder sb = new StringBuilder(); - sb.append("ID : " + backupId).append("\n"); - sb.append("Type : " + getType()).append("\n"); - sb.append("Tables : " + getTableListAsString()).append("\n"); - sb.append("State : " + getState()).append("\n"); - Date date = null; - Calendar cal = Calendar.getInstance(); - cal.setTimeInMillis(getStartTs()); - date = cal.getTime(); - sb.append("Start time : " + date).append("\n"); - if (state == BackupState.FAILED) { - sb.append("Failed message : " + getFailedMsg()).append("\n"); - } else if (state == BackupState.RUNNING) { - sb.append("Phase : " + getPhase()).append("\n"); - } else if (state == BackupState.COMPLETE) { - cal = Calendar.getInstance(); - cal.setTimeInMillis(getEndTs()); - date = cal.getTime(); - sb.append("End time : " + date).append("\n"); - } - sb.append("Progress : " + getProgress()).append("\n"); - return sb.toString(); - } - - public String getStatusAndProgressAsString() { - StringBuilder sb = new StringBuilder(); - sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) - .append(" progress: ").append(getProgress()); - return sb.toString(); - } - - public String getTableListAsString() { - return StringUtils.join(backupStatusMap.keySet(), ","); - } - - @Override - public int compareTo(BackupInfo o) { - Long thisTS = new Long(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); - Long otherTS = new Long(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); - return thisTS.compareTo(otherTS); - } - -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java deleted file mode 100644 index d141239..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import java.util.List; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * POJO class for backup request - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public final class BackupRequest { - private BackupType type; - private List tableList; - private String targetRootDir; - private int workers = -1; - private long bandwidth = -1L; - private String backupSetName; - - public BackupRequest() { - } - - public BackupRequest setBackupType(BackupType type) { - this.type = type; - return this; - } - public BackupType getBackupType() { - return this.type; - } - - public BackupRequest setTableList(List tableList) { - this.tableList = tableList; - return this; - } - public List getTableList() { - return this.tableList; - } - - public BackupRequest setTargetRootDir(String targetRootDir) { - this.targetRootDir = targetRootDir; - return this; - } - public String getTargetRootDir() { - return this.targetRootDir; - } - - public BackupRequest setWorkers(int workers) { - this.workers = workers; - return this; - } - public int getWorkers() { - return this.workers; - } - - public BackupRequest setBandwidth(long bandwidth) { - this.bandwidth = bandwidth; - return this; - } - public long getBandwidth() { - return this.bandwidth; - } - - public String getBackupSetName() { - return backupSetName; - } - - public void setBackupSetName(String backupSetName) { - this.backupSetName = backupSetName; - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java deleted file mode 100644 index c82e05a..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup; - -import java.io.Serializable; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; - -/** - * Backup status and related information encapsulated for a table. - * At this moment only TargetDir and SnapshotName is encapsulated here. - */ - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupStatus implements Serializable { - - private static final long serialVersionUID = -5968397963548535982L; - - // table name for backup - private TableName table; - - // target directory of the backup image for this table - private String targetDir; - - // snapshot name for offline/online snapshot - private String snapshotName = null; - - public BackupStatus() { - - } - - public BackupStatus(TableName table, String targetRootDir, String backupId) { - this.table = table; - this.targetDir = BackupClientUtil.getTableBackupDir(targetRootDir, backupId, table); - } - - public String getSnapshotName() { - return snapshotName; - } - - public void setSnapshotName(String snapshotName) { - this.snapshotName = snapshotName; - } - - public String getTargetDir() { - return targetDir; - } - - public TableName getTable() { - return table; - } - - public void setTable(TableName table) { - this.table = table; - } - - public void setTargetDir(String targetDir) { - this.targetDir = targetDir; - } - - public static BackupStatus convert(BackupProtos.TableBackupStatus proto) - { - BackupStatus bs = new BackupStatus(); - bs.setTable(ProtobufUtil.toTableName(proto.getTable())); - bs.setTargetDir(proto.getTargetDir()); - if(proto.hasSnapshot()){ - bs.setSnapshotName(proto.getSnapshot()); - } - return bs; - } - - public BackupProtos.TableBackupStatus toProto() { - BackupProtos.TableBackupStatus.Builder builder = - BackupProtos.TableBackupStatus.newBuilder(); - if(snapshotName != null) { - builder.setSnapshot(snapshotName); - } - builder.setTable(ProtobufUtil.toProtoTableName(table)); - builder.setTargetDir(targetDir); - return builder.build(); - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java deleted file mode 100644 index 7490d20..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * POJO class for restore request - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class RestoreRequest { - - private String backupRootDir; - private String backupId; - private boolean check = false; - private TableName[] fromTables; - private TableName[] toTables; - private boolean overwrite = false; - - public RestoreRequest() { - } - - public String getBackupRootDir() { - return backupRootDir; - } - - public RestoreRequest setBackupRootDir(String backupRootDir) { - this.backupRootDir = backupRootDir; - return this; - } - - public String getBackupId() { - return backupId; - } - - public RestoreRequest setBackupId(String backupId) { - this.backupId = backupId; - return this; - } - - public boolean isCheck() { - return check; - } - - public RestoreRequest setCheck(boolean check) { - this.check = check; - return this; - } - - public TableName[] getFromTables() { - return fromTables; - } - - public RestoreRequest setFromTables(TableName[] fromTables) { - this.fromTables = fromTables; - return this; - } - - public TableName[] getToTables() { - return toTables; - } - - public RestoreRequest setToTables(TableName[] toTables) { - this.toTables = toTables; - return this; - } - - public boolean isOverwrite() { - return overwrite; - } - - public RestoreRequest setOverwrite(boolean overwrite) { - this.overwrite = overwrite; - return this; - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java deleted file mode 100644 index 2ff5756..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ /dev/null @@ -1,717 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.List; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.backup.util.BackupSet; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.BackupAdmin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; - -import com.google.common.collect.Lists; - -/** - * General backup commands, options and usage messages - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class BackupCommands { - - public final static String INCORRECT_USAGE = "Incorrect usage"; - - public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n" - + "where COMMAND is one of:\n" - + " create create a new backup image\n" - + " delete delete an existing backup image\n" - + " describe show the detailed information of a backup image\n" - + " history show history of all successful backups\n" - + " progress show the progress of the latest backup request\n" - + " set backup set management\n" - + "Run \'hbase backup COMMAND -h\' to see help message for each command\n"; - - public static final String CREATE_CMD_USAGE = - "Usage: hbase backup create [tables] [-set name] " - + "[-w workers][-b bandwith]\n" - + " type \"full\" to create a full backup image\n" - + " \"incremental\" to create an incremental backup image\n" - + " BACKUP_ROOT The full root path to store the backup image,\n" - + " the prefix can be hdfs, webhdfs or gpfs\n" - + "Options:\n" - + " tables If no tables (\"\") are specified, all tables are backed up.\n" - + " Otherwise it is a comma separated list of tables.\n" - + " -w number of parallel workers (MapReduce tasks).\n" - + " -b bandwith per one worker (MapReduce task) in MBs per sec\n" - + " -set name of backup set to use (mutually exclusive with [tables])" ; - - public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n" - + " backupId backup image id\n"; - public static final String NO_INFO_FOUND = "No info was found for backup id: "; - - public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n" - + " backupId backup image id\n"; - - public static final String HISTORY_CMD_USAGE = - "Usage: hbase backup history [-path BACKUP_ROOT] [-n N] [-t table]\n" - + " -n N show up to N last backup sessions, default - 10\n" - + " -path backup root path\n" - + " -t table table name. If specified, only backup images which contain this table\n" - + " will be listed." ; - - - public static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n" - + " backupId backup image id\n"; - - public static final String CANCEL_CMD_USAGE = "Usage: hbase backup cancel \n" - + " backupId backup image id\n"; - - public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" - + " name Backup set name\n" - + " tables If no tables (\"\") are specified, all tables will belong to the set.\n" - + " Otherwise it is a comma separated list of tables.\n" - + "COMMAND is one of:\n" - + " add add tables to a set, create a set if needed\n" - + " remove remove tables from a set\n" - + " list list all backup sets in the system\n" - + " describe describe set\n" - + " delete delete backup set\n"; - - public static abstract class Command extends Configured { - CommandLine cmdline; - - Command(Configuration conf) { - super(conf); - } - - public void execute() throws IOException - { - if (cmdline.hasOption("h") || cmdline.hasOption("help")) { - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - } - - protected abstract void printUsage(); - } - - private BackupCommands() { - throw new AssertionError("Instantiating utility class..."); - } - - public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) { - Command cmd = null; - switch (type) { - case CREATE: - cmd = new CreateCommand(conf, cmdline); - break; - case DESCRIBE: - cmd = new DescribeCommand(conf, cmdline); - break; - case PROGRESS: - cmd = new ProgressCommand(conf, cmdline); - break; - case DELETE: - cmd = new DeleteCommand(conf, cmdline); - break; - case CANCEL: - cmd = new CancelCommand(conf, cmdline); - break; - case HISTORY: - cmd = new HistoryCommand(conf, cmdline); - break; - case SET: - cmd = new BackupSetCommand(conf, cmdline); - break; - case HELP: - default: - cmd = new HelpCommand(conf, cmdline); - break; - } - return cmd; - } - - static int numOfArgs(String[] args) { - if (args == null) return 0; - return args.length; - } - - public static class CreateCommand extends Command { - - CreateCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - if (cmdline == null || cmdline.getArgs() == null) { - System.err.println("ERROR: missing arguments"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - String[] args = cmdline.getArgs(); - if (args.length < 3 || args.length > 4) { - System.err.println("ERROR: wrong number of arguments: "+ args.length); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - if (!BackupType.FULL.toString().equalsIgnoreCase(args[1]) - && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) { - System.err.println("ERROR: invalid backup type: "+ args[1]); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String tables = null; - Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - - // Check backup set - String setName = null; - if (cmdline.hasOption("set")) { - setName = cmdline.getOptionValue("set"); - tables = getTablesForSet(setName, conf); - - if (tables == null) { - System.err.println("ERROR: Backup set '" + setName+ "' is either empty or does not exist"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - } else { - tables = (args.length == 4) ? args[3] : null; - } - int bandwidth = cmdline.hasOption('b') ? Integer.parseInt(cmdline.getOptionValue('b')) : -1; - int workers = cmdline.hasOption('w') ? Integer.parseInt(cmdline.getOptionValue('w')) : -1; - - try (Connection conn = ConnectionFactory.createConnection(getConf()); - Admin admin = conn.getAdmin(); - BackupAdmin backupAdmin = admin.getBackupAdmin();) { - BackupRequest request = new BackupRequest(); - request.setBackupType(BackupType.valueOf(args[1].toUpperCase())) - .setTableList(tables != null?Lists.newArrayList(BackupClientUtil.parseTableNames(tables)): null) - .setTargetRootDir(args[2]).setWorkers(workers).setBandwidth(bandwidth) - .setBackupSetName(setName); - String backupId = backupAdmin.backupTables(request); - System.out.println("Backup session "+ backupId+" finished. Status: SUCCESS"); - } catch (IOException e) { - System.err.println("Backup session finished. Status: FAILURE"); - throw e; - } - } - private String getTablesForSet(String name, Configuration conf) - throws IOException { - try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupSystemTable table = new BackupSystemTable(conn)) { - List tables = table.describeBackupSet(name); - if (tables == null) return null; - return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); - } - } - - @Override - protected void printUsage() { - System.err.println(CREATE_CMD_USAGE); - } - } - - private static class HelpCommand extends Command { - - HelpCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - if (cmdline == null) { - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String[] args = cmdline.getArgs(); - if (args == null || args.length == 0) { - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - if (args.length != 2) { - System.err.println("Only supports help message of a single command type"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String type = args[1]; - - if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) { - System.out.println(CREATE_CMD_USAGE); - } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(type)) { - System.out.println(DESCRIBE_CMD_USAGE); - } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(type)) { - System.out.println(HISTORY_CMD_USAGE); - } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(type)) { - System.out.println(PROGRESS_CMD_USAGE); - } else if (BackupCommand.DELETE.name().equalsIgnoreCase(type)) { - System.out.println(DELETE_CMD_USAGE); - } else if (BackupCommand.CANCEL.name().equalsIgnoreCase(type)) { - System.out.println(CANCEL_CMD_USAGE); - } else if (BackupCommand.SET.name().equalsIgnoreCase(type)) { - System.out.println(SET_CMD_USAGE); - } else { - System.out.println("Unknown command : " + type); - printUsage(); - } - } - - @Override - protected void printUsage() { - System.err.println(USAGE); - } - } - - private static class DescribeCommand extends Command { - - DescribeCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - if (cmdline == null || cmdline.getArgs() == null) { - System.err.println("ERROR: missing arguments"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - String[] args = cmdline.getArgs(); - if (args.length != 2) { - System.err.println("ERROR: wrong number of arguments"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String backupId = args[1]; - Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); - try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { - BackupInfo info = admin.getBackupInfo(backupId); - if (info == null) { - System.err.println("ERROR: " + backupId + " does not exist"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - System.out.println(info.getShortDescription()); - } - } - - @Override - protected void printUsage() { - System.err.println(DESCRIBE_CMD_USAGE); - } - } - - private static class ProgressCommand extends Command { - - ProgressCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - - if (cmdline == null || cmdline.getArgs() == null || - cmdline.getArgs().length == 1) { - System.err.println("No backup id was specified, " - + "will retrieve the most recent (ongoing) sessions"); - } - String[] args = cmdline.getArgs(); - if (args.length > 2) { - System.err.println("ERROR: wrong number of arguments: " + args.length); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String backupId = (args == null || args.length <= 1) ? null : args[1]; - Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - int progress = admin.getProgress(backupId); - if(progress < 0){ - System.err.println(NO_INFO_FOUND + backupId); - } else{ - System.out.println(backupId+" progress=" + progress+"%"); - } - } - } - - @Override - protected void printUsage() { - System.err.println(PROGRESS_CMD_USAGE); - } - } - - private static class DeleteCommand extends Command { - - DeleteCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { - System.err.println("No backup id(s) was specified"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String[] args = cmdline.getArgs(); - - String[] backupIds = new String[args.length - 1]; - System.arraycopy(args, 1, backupIds, 0, backupIds.length); - Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); - try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { - int deleted = admin.deleteBackups(args); - System.out.println("Deleted " + deleted + " backups. Total requested: " + args.length); - } - - } - - @Override - protected void printUsage() { - System.err.println(DELETE_CMD_USAGE); - } - } - -// TODO Cancel command - - private static class CancelCommand extends Command { - - CancelCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { - System.out.println("No backup id(s) was specified, will use the most recent one"); - } - String[] args = cmdline.getArgs(); - String backupId = args == null || args.length == 0 ? null : args[1]; - Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); - try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { - // TODO cancel backup - } - } - - @Override - protected void printUsage() { - } - } - - private static class HistoryCommand extends Command { - - private final static int DEFAULT_HISTORY_LENGTH = 10; - - HistoryCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - - super.execute(); - - int n = parseHistoryLength(); - final TableName tableName = getTableName(); - final String setName = getTableSetName(); - BackupInfo.Filter tableNameFilter = new BackupInfo.Filter() { - @Override - public boolean apply(BackupInfo info) { - if (tableName == null) return true; - List names = info.getTableNames(); - return names.contains(tableName); - } - }; - BackupInfo.Filter tableSetFilter = new BackupInfo.Filter() { - @Override - public boolean apply(BackupInfo info) { - if (setName == null) return true; - String backupId = info.getBackupId(); - return backupId.startsWith(setName); - } - }; - Path backupRootPath = getBackupRootPath(); - List history = null; - Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); - if (backupRootPath == null) { - // Load from hbase:backup - try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { - - history = admin.getHistory(n, tableNameFilter, tableSetFilter); - } - } else { - // load from backup FS - history = BackupClientUtil.getHistory(conf, n, backupRootPath, - tableNameFilter, tableSetFilter); - } - for (BackupInfo info : history) { - System.out.println(info.getShortDescription()); - } - } - - private Path getBackupRootPath() throws IOException { - String value = null; - try{ - value = cmdline.getOptionValue("path"); - if (value == null) return null; - return new Path(value); - } catch (IllegalArgumentException e) { - System.err.println("ERROR: Illegal argument for backup root path: "+ value); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - } - - private TableName getTableName() throws IOException { - String value = cmdline.getOptionValue("t"); - if (value == null) return null; - try{ - return TableName.valueOf(value); - } catch (IllegalArgumentException e){ - System.err.println("Illegal argument for table name: "+ value); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - } - - private String getTableSetName() throws IOException { - String value = cmdline.getOptionValue("set"); - return value; - } - - private int parseHistoryLength() throws IOException { - String value = cmdline.getOptionValue("n"); - try{ - if (value == null) return DEFAULT_HISTORY_LENGTH; - return Integer.parseInt(value); - } catch(NumberFormatException e) { - System.err.println("Illegal argument for history length: "+ value); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - } - - @Override - protected void printUsage() { - System.err.println(HISTORY_CMD_USAGE); - } - } - - private static class BackupSetCommand extends Command { - private final static String SET_ADD_CMD = "add"; - private final static String SET_REMOVE_CMD = "remove"; - private final static String SET_DELETE_CMD = "delete"; - private final static String SET_DESCRIBE_CMD = "describe"; - private final static String SET_LIST_CMD = "list"; - - BackupSetCommand(Configuration conf, CommandLine cmdline) { - super(conf); - this.cmdline = cmdline; - } - - @Override - public void execute() throws IOException { - super.execute(); - // Command-line must have at least one element - if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { - System.err.println("ERROR: Command line format"); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String[] args = cmdline.getArgs(); - String cmdStr = args[1]; - BackupCommand cmd = getCommand(cmdStr); - - switch (cmd) { - case SET_ADD: - processSetAdd(args); - break; - case SET_REMOVE: - processSetRemove(args); - break; - case SET_DELETE: - processSetDelete(args); - break; - case SET_DESCRIBE: - processSetDescribe(args); - break; - case SET_LIST: - processSetList(args); - break; - default: - break; - - } - } - - private void processSetList(String[] args) throws IOException { - // List all backup set names - // does not expect any args - Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - List list = admin.listBackupSets(); - for(BackupSet bs: list){ - System.out.println(bs); - } - } - } - - private void processSetDescribe(String[] args) throws IOException { - if (args == null || args.length != 3) { - System.err.println("ERROR: Wrong number of args for 'set describe' command: " - + numOfArgs(args)); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - String setName = args[2]; - Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - BackupSet set = admin.getBackupSet(setName); - if(set == null) { - System.out.println("Set '"+setName+"' does not exist."); - } else{ - System.out.println(set); - } - } - } - - private void processSetDelete(String[] args) throws IOException { - if (args == null || args.length != 3) { - System.err.println("ERROR: Wrong number of args for 'set delete' command: " - + numOfArgs(args)); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - String setName = args[2]; - Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - boolean result = admin.deleteBackupSet(setName); - if(result){ - System.out.println("Delete set "+setName+" OK."); - } else{ - System.out.println("Set "+setName+" does not exist"); - } - } - } - - private void processSetRemove(String[] args) throws IOException { - if (args == null || args.length != 4) { - System.err.println("ERROR: Wrong number of args for 'set remove' command: " - + numOfArgs(args)); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - - String setName = args[2]; - String[] tables = args[3].split(","); - Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - admin.removeFromBackupSet(setName, tables); - } - } - - private void processSetAdd(String[] args) throws IOException { - if (args == null || args.length != 4) { - System.err.println("ERROR: Wrong number of args for 'set add' command: " - + numOfArgs(args)); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - String setName = args[2]; - String[] tables = args[3].split(","); - TableName[] tableNames = new TableName[tables.length]; - for(int i=0; i < tables.length; i++){ - tableNames[i] = TableName.valueOf(tables[i]); - } - Configuration conf = getConf() != null? getConf():HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); - final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - admin.addToBackupSet(setName, tableNames); - } - - } - - private BackupCommand getCommand(String cmdStr) throws IOException { - if (cmdStr.equals(SET_ADD_CMD)) { - return BackupCommand.SET_ADD; - } else if (cmdStr.equals(SET_REMOVE_CMD)) { - return BackupCommand.SET_REMOVE; - } else if (cmdStr.equals(SET_DELETE_CMD)) { - return BackupCommand.SET_DELETE; - } else if (cmdStr.equals(SET_DESCRIBE_CMD)) { - return BackupCommand.SET_DESCRIBE; - } else if (cmdStr.equals(SET_LIST_CMD)) { - return BackupCommand.SET_LIST; - } else { - System.err.println("ERROR: Unknown command for 'set' :" + cmdStr); - printUsage(); - throw new IOException(INCORRECT_USAGE); - } - } - - @Override - protected void printUsage() { - System.err.println(SET_CMD_USAGE); - } - - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java deleted file mode 100644 index ca204b4..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * Backup exception - */ -@SuppressWarnings("serial") -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupException extends HBaseIOException { - private BackupInfo description; - - /** - * Some exception happened for a backup and don't even know the backup that it was about - * @param msg Full description of the failure - */ - public BackupException(String msg) { - super(msg); - } - - /** - * Some exception happened for a backup with a cause - * @param cause the cause - */ - public BackupException(Throwable cause) { - super(cause); - } - - /** - * Exception for the given backup that has no previous root cause - * @param msg reason why the backup failed - * @param desc description of the backup that is being failed - */ - public BackupException(String msg, BackupInfo desc) { - super(msg); - this.description = desc; - } - - /** - * Exception for the given backup due to another exception - * @param msg reason why the backup failed - * @param cause root cause of the failure - * @param desc description of the backup that is being failed - */ - public BackupException(String msg, Throwable cause, BackupInfo desc) { - super(msg, cause); - this.description = desc; - } - - /** - * Exception when the description of the backup cannot be determined, due to some other root - * cause - * @param message description of what caused the failure - * @param e root cause - */ - public BackupException(String message, Exception e) { - super(message, e); - } - - public BackupInfo getBackupContext() { - return this.description; - } - -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java deleted file mode 100644 index d10713d..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ /dev/null @@ -1,791 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.TreeMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - - -/** - * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled - * as manifest file together with data. So that each backup image will contain all the info needed - * for restore. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupManifest { - - private static final Log LOG = LogFactory.getLog(BackupManifest.class); - - // manifest file name - public static final String MANIFEST_FILE_NAME = ".backup.manifest"; - - // manifest file version, current is 1.0 - public static final String MANIFEST_VERSION = "1.0"; - - // backup image, the dependency graph is made up by series of backup images - - public static class BackupImage implements Comparable { - - private String backupId; - private BackupType type; - private String rootDir; - private List tableList; - private long startTs; - private long completeTs; - private ArrayList ancestors; - - public BackupImage() { - super(); - } - - public BackupImage(String backupId, BackupType type, String rootDir, - List tableList, long startTs, long completeTs) { - this.backupId = backupId; - this.type = type; - this.rootDir = rootDir; - this.tableList = tableList; - this.startTs = startTs; - this.completeTs = completeTs; - } - - static BackupImage fromProto(BackupProtos.BackupImage im) { - String backupId = im.getBackupId(); - String rootDir = im.getRootDir(); - long startTs = im.getStartTs(); - long completeTs = im.getCompleteTs(); - List tableListList = im.getTableListList(); - List tableList = new ArrayList(); - for(HBaseProtos.TableName tn : tableListList) { - tableList.add(ProtobufUtil.toTableName(tn)); - } - - List ancestorList = im.getAncestorsList(); - - BackupType type = - im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: - BackupType.INCREMENTAL; - - BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); - for(BackupProtos.BackupImage img: ancestorList) { - image.addAncestor(fromProto(img)); - } - return image; - } - - BackupProtos.BackupImage toProto() { - BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder(); - builder.setBackupId(backupId); - builder.setCompleteTs(completeTs); - builder.setStartTs(startTs); - builder.setRootDir(rootDir); - if (type == BackupType.FULL) { - builder.setBackupType(BackupProtos.BackupType.FULL); - } else{ - builder.setBackupType(BackupProtos.BackupType.INCREMENTAL); - } - - for (TableName name: tableList) { - builder.addTableList(ProtobufUtil.toProtoTableName(name)); - } - - if (ancestors != null){ - for (BackupImage im: ancestors){ - builder.addAncestors(im.toProto()); - } - } - - return builder.build(); - } - - public String getBackupId() { - return backupId; - } - - public void setBackupId(String backupId) { - this.backupId = backupId; - } - - public BackupType getType() { - return type; - } - - public void setType(BackupType type) { - this.type = type; - } - - public String getRootDir() { - return rootDir; - } - - public void setRootDir(String rootDir) { - this.rootDir = rootDir; - } - - public List getTableNames() { - return tableList; - } - - public void setTableList(List tableList) { - this.tableList = tableList; - } - - public long getStartTs() { - return startTs; - } - - public void setStartTs(long startTs) { - this.startTs = startTs; - } - - public long getCompleteTs() { - return completeTs; - } - - public void setCompleteTs(long completeTs) { - this.completeTs = completeTs; - } - - public ArrayList getAncestors() { - if (this.ancestors == null) { - this.ancestors = new ArrayList(); - } - return this.ancestors; - } - - public void addAncestor(BackupImage backupImage) { - this.getAncestors().add(backupImage); - } - - public boolean hasAncestor(String token) { - for (BackupImage image : this.getAncestors()) { - if (image.getBackupId().equals(token)) { - return true; - } - } - return false; - } - - public boolean hasTable(TableName table) { - for (TableName t : tableList) { - if (t.equals(table)) { - return true; - } - } - return false; - } - - @Override - public int compareTo(BackupImage other) { - String thisBackupId = this.getBackupId(); - String otherBackupId = other.getBackupId(); - int index1 = thisBackupId.lastIndexOf("_"); - int index2 = otherBackupId.lastIndexOf("_"); - String name1 = thisBackupId.substring(0, index1); - String name2 = otherBackupId.substring(0, index2); - if(name1.equals(name2)) { - Long thisTS = new Long(thisBackupId.substring(index1 + 1)); - Long otherTS = new Long(otherBackupId.substring(index2 + 1)); - return thisTS.compareTo(otherTS); - } else { - return name1.compareTo(name2); - } - } - } - - // manifest version - private String version = MANIFEST_VERSION; - - // hadoop hbase configuration - protected Configuration config = null; - - // backup root directory - private String rootDir = null; - - // backup image directory - private String tableBackupDir = null; - - // backup log directory if this is an incremental backup - private String logBackupDir = null; - - // backup token - private String backupId; - - // backup type, full or incremental - private BackupType type; - - // the table list for the backup - private ArrayList tableList; - - // actual start timestamp of the backup process - private long startTs; - - // actual complete timestamp of the backup process - private long completeTs; - - // the region server timestamp for tables: - // > - private Map> incrTimeRanges; - - // dependency of this backup, including all the dependent images to do PIT recovery - private Map dependency; - - /** - * Construct manifest for a ongoing backup. - * @param backupCtx The ongoing backup context - */ - public BackupManifest(BackupInfo backupCtx) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); - this.loadTableList(backupCtx.getTableNames()); - } - - - /** - * Construct a table level manifest for a backup of the named table. - * @param backupCtx The ongoing backup context - */ - public BackupManifest(BackupInfo backupCtx, TableName table) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); - List tables = new ArrayList(); - tables.add(table); - this.loadTableList(tables); - } - - /** - * Construct manifest from a backup directory. - * @param conf configuration - * @param backupPath backup path - * @throws IOException - */ - - public BackupManifest(Configuration conf, Path backupPath) throws IOException { - this(backupPath.getFileSystem(conf), backupPath); - } - - /** - * Construct manifest from a backup directory. - * @param conf configuration - * @param backupPath backup path - * @throws BackupException exception - */ - - public BackupManifest(FileSystem fs, Path backupPath) throws BackupException { - if (LOG.isDebugEnabled()) { - LOG.debug("Loading manifest from: " + backupPath.toString()); - } - // The input backupDir may not exactly be the backup table dir. - // It could be the backup log dir where there is also a manifest file stored. - // This variable's purpose is to keep the correct and original location so - // that we can store/persist it. - this.tableBackupDir = backupPath.toString(); - this.config = fs.getConf(); - try { - - FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); - if (subFiles == null) { - String errorMsg = backupPath.toString() + " does not exist"; - LOG.error(errorMsg); - throw new IOException(errorMsg); - } - for (FileStatus subFile : subFiles) { - if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) { - - // load and set manifest field from file content - FSDataInputStream in = fs.open(subFile.getPath()); - long len = subFile.getLen(); - byte[] pbBytes = new byte[(int) len]; - in.readFully(pbBytes); - BackupProtos.BackupManifest proto = null; - try{ - proto = parseFrom(pbBytes); - } catch(Exception e){ - throw new BackupException(e); - } - this.version = proto.getVersion(); - this.backupId = proto.getBackupId(); - this.type = BackupType.valueOf(proto.getType().name()); - // Here the parameter backupDir is where the manifest file is. - // There should always be a manifest file under: - // backupRootDir/namespace/table/backupId/.backup.manifest - this.rootDir = backupPath.getParent().getParent().getParent().toString(); - - Path p = backupPath.getParent(); - if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - this.rootDir = p.getParent().toString(); - } else { - this.rootDir = p.getParent().getParent().toString(); - } - - loadTableList(proto); - this.startTs = proto.getStartTs(); - this.completeTs = proto.getCompleteTs(); - loadIncrementalTimestampMap(proto); - loadDependency(proto); - //TODO: merge will be implemented by future jira - LOG.debug("Loaded manifest instance from manifest file: " - + BackupClientUtil.getPath(subFile.getPath())); - return; - } - } - String errorMsg = "No manifest file found in: " + backupPath.toString(); - throw new IOException(errorMsg); - - } catch (IOException e) { - throw new BackupException(e.getMessage()); - } - } - - private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) { - List list = proto.getTstMapList(); - if(list == null || list.size() == 0) return; - this.incrTimeRanges = new HashMap>(); - for(BackupProtos.TableServerTimestamp tst: list){ - TableName tn = ProtobufUtil.toTableName(tst.getTable()); - HashMap map = this.incrTimeRanges.get(tn); - if(map == null){ - map = new HashMap(); - this.incrTimeRanges.put(tn, map); - } - List listSt = tst.getServerTimestampList(); - for(BackupProtos.ServerTimestamp stm: listSt) { - map.put(stm.getServer(), stm.getTimestamp()); - } - } - } - - private void loadDependency(BackupProtos.BackupManifest proto) { - if(LOG.isDebugEnabled()) { - LOG.debug("load dependency for: "+proto.getBackupId()); - } - - dependency = new HashMap(); - List list = proto.getDependentBackupImageList(); - for (BackupProtos.BackupImage im : list) { - BackupImage bim = BackupImage.fromProto(im); - if(im.getBackupId() != null){ - dependency.put(im.getBackupId(), bim); - } else{ - LOG.warn("Load dependency for backup manifest: "+ backupId+ - ". Null backup id in dependent image"); - } - } - } - - private void loadTableList(BackupProtos.BackupManifest proto) { - this.tableList = new ArrayList(); - List list = proto.getTableListList(); - for (HBaseProtos.TableName name: list) { - this.tableList.add(ProtobufUtil.toTableName(name)); - } - } - - public BackupType getType() { - return type; - } - - public void setType(BackupType type) { - this.type = type; - } - - /** - * Loads table list. - * @param tableList Table list - */ - private void loadTableList(List tableList) { - - this.tableList = this.getTableList(); - if (this.tableList.size() > 0) { - this.tableList.clear(); - } - for (int i = 0; i < tableList.size(); i++) { - this.tableList.add(tableList.get(i)); - } - - LOG.debug(tableList.size() + " tables exist in table set."); - } - - /** - * Get the table set of this image. - * @return The table set list - */ - public ArrayList getTableList() { - if (this.tableList == null) { - this.tableList = new ArrayList(); - } - return this.tableList; - } - - /** - * Persist the manifest file. - * @throws IOException IOException when storing the manifest file. - */ - - public void store(Configuration conf) throws BackupException { - byte[] data = toByteArray(); - - // write the file, overwrite if already exist - Path manifestFilePath = - new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) - ,MANIFEST_FILE_NAME); - try { - FSDataOutputStream out = - manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); - out.write(data); - out.close(); - } catch (IOException e) { - throw new BackupException(e.getMessage()); - } - - LOG.info("Manifest file stored to " + manifestFilePath); - } - - /** - * Protobuf serialization - * @return The filter serialized using pb - */ - public byte[] toByteArray() { - BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder(); - builder.setVersion(this.version); - builder.setBackupId(this.backupId); - builder.setType(BackupProtos.BackupType.valueOf(this.type.name())); - setTableList(builder); - builder.setStartTs(this.startTs); - builder.setCompleteTs(this.completeTs); - setIncrementalTimestampMap(builder); - setDependencyMap(builder); - return builder.build().toByteArray(); - } - - private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { - if (this.incrTimeRanges == null) { - return; - } - for (Entry> entry: this.incrTimeRanges.entrySet()) { - TableName key = entry.getKey(); - HashMap value = entry.getValue(); - BackupProtos.TableServerTimestamp.Builder tstBuilder = - BackupProtos.TableServerTimestamp.newBuilder(); - tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); - - for (String s : value.keySet()) { - BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder(); - stBuilder.setServer(s); - stBuilder.setTimestamp(value.get(s)); - tstBuilder.addServerTimestamp(stBuilder.build()); - } - builder.addTstMap(tstBuilder.build()); - } - } - - private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) { - for (BackupImage image: getDependency().values()) { - builder.addDependentBackupImage(image.toProto()); - } - } - - private void setTableList(BackupProtos.BackupManifest.Builder builder) { - for(TableName name: tableList){ - builder.addTableList(ProtobufUtil.toProtoTableName(name)); - } - } - - /** - * Parse protobuf from byte array - * @param pbBytes A pb serialized BackupManifest instance - * @return An instance of made from bytes - * @throws DeserializationException - */ - private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes) - throws DeserializationException { - BackupProtos.BackupManifest proto; - try { - proto = BackupProtos.BackupManifest.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return proto; - } - - /** - * Get manifest file version - * @return version - */ - public String getVersion() { - return version; - } - - /** - * Get this backup image. - * @return the backup image. - */ - public BackupImage getBackupImage() { - return this.getDependency().get(this.backupId); - } - - /** - * Add dependent backup image for this backup. - * @param image The direct dependent backup image - */ - public void addDependentImage(BackupImage image) { - this.getDependency().get(this.backupId).addAncestor(image); - this.setDependencyMap(this.getDependency(), image); - } - - - - /** - * Get all dependent backup images. The image of this backup is also contained. - * @return The dependent backup images map - */ - public Map getDependency() { - if (this.dependency == null) { - this.dependency = new HashMap(); - LOG.debug(this.rootDir + " " + this.backupId + " " + this.type); - this.dependency.put(this.backupId, - new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, - this.completeTs)); - } - return this.dependency; - } - - /** - * Set the incremental timestamp map directly. - * @param incrTimestampMap timestamp map - */ - public void setIncrTimestampMap(HashMap> incrTimestampMap) { - this.incrTimeRanges = incrTimestampMap; - } - - - public Map> getIncrTimestampMap() { - if (this.incrTimeRanges == null) { - this.incrTimeRanges = new HashMap>(); - } - return this.incrTimeRanges; - } - - - /** - * Get the image list of this backup for restore in time order. - * @param reverse If true, then output in reverse order, otherwise in time order from old to new - * @return the backup image list for restore in time order - */ - public ArrayList getRestoreDependentList(boolean reverse) { - TreeMap restoreImages = new TreeMap(); - for (BackupImage image : this.getDependency().values()) { - restoreImages.put(Long.valueOf(image.startTs), image); - } - return new ArrayList(reverse ? (restoreImages.descendingMap().values()) - : (restoreImages.values())); - } - - /** - * Get the dependent image list for a specific table of this backup in time order from old to new - * if want to restore to this backup image level. - * @param table table - * @return the backup image list for a table in time order - */ - public ArrayList getDependentListByTable(TableName table) { - ArrayList tableImageList = new ArrayList(); - ArrayList imageList = getRestoreDependentList(true); - for (BackupImage image : imageList) { - if (image.hasTable(table)) { - tableImageList.add(image); - if (image.getType() == BackupType.FULL) { - break; - } - } - } - Collections.reverse(tableImageList); - return tableImageList; - } - - /** - * Get the full dependent image list in the whole dependency scope for a specific table of this - * backup in time order from old to new. - * @param table table - * @return the full backup image list for a table in time order in the whole scope of the - * dependency of this image - */ - public ArrayList getAllDependentListByTable(TableName table) { - ArrayList tableImageList = new ArrayList(); - ArrayList imageList = getRestoreDependentList(false); - for (BackupImage image : imageList) { - if (image.hasTable(table)) { - tableImageList.add(image); - } - } - return tableImageList; - } - - - /** - * Recursively set the dependency map of the backup images. - * @param map The dependency map - * @param image The backup image - */ - private void setDependencyMap(Map map, BackupImage image) { - if (image == null) { - return; - } else { - map.put(image.getBackupId(), image); - for (BackupImage img : image.getAncestors()) { - setDependencyMap(map, img); - } - } - } - - /** - * Check whether backup image1 could cover backup image2 or not. - * @param image1 backup image 1 - * @param image2 backup image 2 - * @return true if image1 can cover image2, otherwise false - */ - public static boolean canCoverImage(BackupImage image1, BackupImage image2) { - // image1 can cover image2 only when the following conditions are satisfied: - // - image1 must not be an incremental image; - // - image1 must be taken after image2 has been taken; - // - table set of image1 must cover the table set of image2. - if (image1.getType() == BackupType.INCREMENTAL) { - return false; - } - if (image1.getStartTs() < image2.getStartTs()) { - return false; - } - List image1TableList = image1.getTableNames(); - List image2TableList = image2.getTableNames(); - boolean found = false; - for (int i = 0; i < image2TableList.size(); i++) { - found = false; - for (int j = 0; j < image1TableList.size(); j++) { - if (image2TableList.get(i).equals(image1TableList.get(j))) { - found = true; - break; - } - } - if (!found) { - return false; - } - } - - LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); - return true; - } - - /** - * Check whether backup image set could cover a backup image or not. - * @param fullImages The backup image set - * @param image The target backup image - * @return true if fullImages can cover image, otherwise false - */ - public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { - // fullImages can cover image only when the following conditions are satisfied: - // - each image of fullImages must not be an incremental image; - // - each image of fullImages must be taken after image has been taken; - // - sum table set of fullImages must cover the table set of image. - for (BackupImage image1 : fullImages) { - if (image1.getType() == BackupType.INCREMENTAL) { - return false; - } - if (image1.getStartTs() < image.getStartTs()) { - return false; - } - } - - ArrayList image1TableList = new ArrayList(); - for (BackupImage image1 : fullImages) { - List tableList = image1.getTableNames(); - for (TableName table : tableList) { - image1TableList.add(table.getNameAsString()); - } - } - ArrayList image2TableList = new ArrayList(); - List tableList = image.getTableNames(); - for (TableName table : tableList) { - image2TableList.add(table.getNameAsString()); - } - - for (int i = 0; i < image2TableList.size(); i++) { - if (image1TableList.contains(image2TableList.get(i)) == false) { - return false; - } - } - - LOG.debug("Full image set can cover image " + image.getBackupId()); - return true; - } - - public BackupInfo toBackupInfo() - { - BackupInfo info = new BackupInfo(); - info.setType(type); - TableName[] tables = new TableName[tableList.size()]; - info.addTables(getTableList().toArray(tables)); - info.setBackupId(backupId); - info.setStartTs(startTs); - info.setTargetRootDir(rootDir); - if(type == BackupType.INCREMENTAL) { - info.setHlogTargetDir(logBackupDir); - } - return info; - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java deleted file mode 100644 index ac1d2bc..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * BackupRestoreConstants holds a bunch of HBase Backup and Restore constants - */ -@InterfaceAudience.Private -@InterfaceStability.Stable -public final class BackupRestoreConstants { - - - // delimiter in tablename list in restore command - public static final String TABLENAME_DELIMITER_IN_COMMAND = ","; - - public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root"; - - public static final String BACKUPID_PREFIX = "backup_"; - - public static enum BackupCommand { - CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, SET, - SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST - } - - private BackupRestoreConstants() { - // Can't be instantiated with this ctor. - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java deleted file mode 100644 index 3066282..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ /dev/null @@ -1,873 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup.impl; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; - -/** - * This class provides 'hbase:backup' table API - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class BackupSystemTable implements Closeable { - - static class WALItem { - String backupId; - String walFile; - String backupRoot; - - WALItem(String backupId, String walFile, String backupRoot) { - this.backupId = backupId; - this.walFile = walFile; - this.backupRoot = backupRoot; - } - - public String getBackupId() { - return backupId; - } - - public String getWalFile() { - return walFile; - } - - public String getBackupRoot() { - return backupRoot; - } - - public String toString() { - return "/" + backupRoot + "/" + backupId + "/" + walFile; - } - - } - - private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); - private final static TableName tableName = TableName.BACKUP_TABLE_NAME; - // Stores backup sessions (contexts) - final static byte[] SESSIONS_FAMILY = "session".getBytes(); - // Stores other meta - final static byte[] META_FAMILY = "meta".getBytes(); - // Connection to HBase cluster, shared - // among all instances - private final Connection connection; - - public BackupSystemTable(Connection conn) throws IOException { - this.connection = conn; - } - - public void close() { - // do nothing - } - - /** - * Updates status (state) of a backup session in hbase:backup table - * @param context context - * @throws IOException exception - */ - public void updateBackupInfo(BackupInfo context) throws IOException { - - if (LOG.isDebugEnabled()) { - LOG.debug("update backup status in hbase:backup for: " + context.getBackupId() - + " set status=" + context.getState()); - } - try (Table table = connection.getTable(tableName)) { - Put put = BackupSystemTableHelper.createPutForBackupContext(context); - table.put(put); - } - } - - /** - * Deletes backup status from hbase:backup table - * @param backupId backup id - * @throws IOException exception - */ - - public void deleteBackupInfo(String backupId) throws IOException { - - if (LOG.isDebugEnabled()) { - LOG.debug("delete backup status in hbase:backup for " + backupId); - } - try (Table table = connection.getTable(tableName)) { - Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId); - table.delete(del); - } - } - - /** - * Reads backup status object (instance of BackupContext) from hbase:backup table - * @param backupId - backupId - * @return Current status of backup session or null - */ - - public BackupInfo readBackupInfo(String backupId) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("read backup status from hbase:backup for: " + backupId); - } - - try (Table table = connection.getTable(tableName)) { - Get get = BackupSystemTableHelper.createGetForBackupContext(backupId); - Result res = table.get(get); - if (res.isEmpty()) { - return null; - } - return BackupSystemTableHelper.resultToBackupInfo(res); - } - } - - /** - * Read the last backup start code (timestamp) of last successful backup. Will return null if - * there is no start code stored on hbase or the value is of length 0. These two cases indicate - * there is no successful backup completed so far. - * @param backupRoot root directory path to backup - * @return the timestamp of last successful backup - * @throws IOException exception - */ - public String readBackupStartCode(String backupRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("read backup start code from hbase:backup"); - } - try (Table table = connection.getTable(tableName)) { - Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot); - Result res = table.get(get); - if (res.isEmpty()) { - return null; - } - Cell cell = res.listCells().get(0); - byte[] val = CellUtil.cloneValue(cell); - if (val.length == 0) { - return null; - } - return new String(val); - } - } - - /** - * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. - * @param startCode start code - * @param backupRoot root directory path to backup - * @throws IOException exception - */ - public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("write backup start code to hbase:backup " + startCode); - } - try (Table table = connection.getTable(tableName)) { - Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot); - table.put(put); - } - } - - /** - * Get the Region Servers log information after the last log roll from hbase:backup. - * @param backupRoot root directory path to backup - * @return RS log info - * @throws IOException exception - */ - public HashMap readRegionServerLastLogRollResult(String backupRoot) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("read region server last roll log result to hbase:backup"); - } - - Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot); - - try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { - Result res = null; - HashMap rsTimestampMap = new HashMap(); - while ((res = scanner.next()) != null) { - res.advance(); - Cell cell = res.current(); - byte[] row = CellUtil.cloneRow(cell); - String server = - BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row); - byte[] data = CellUtil.cloneValue(cell); - rsTimestampMap.put(server, Long.parseLong(new String(data))); - } - return rsTimestampMap; - } - } - - /** - * Writes Region Server last roll log result (timestamp) to hbase:backup table - * @param server - Region Server name - * @param ts- last log timestamp - * @param backupRoot root directory path to backup - * @throws IOException exception - */ - public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("write region server last roll log result to hbase:backup"); - } - try (Table table = connection.getTable(tableName)) { - Put put = - BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, ts, backupRoot); - table.put(put); - } - } - - /** - * Get all completed backup information (in desc order by time) - * @param onlyCompeleted, true, if only successfully completed sessions - * @return history info of BackupCompleteData - * @throws IOException exception - */ - public ArrayList getBackupHistory(boolean onlyCompleted) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("get backup history from hbase:backup"); - } - ArrayList list; - BackupState state = onlyCompleted ? BackupState.COMPLETE : BackupState.ANY; - list = getBackupContexts(state); - return BackupClientUtil.sortHistoryListDesc(list); - } - - public List getBackupHistory() throws IOException { - return getBackupHistory(false); - } - - /** - * Get history for backup destination - * @param backupRoot - backup destination - * @return List of backup info - * @throws IOException - */ - public List getBackupHistory(String backupRoot) throws IOException { - ArrayList history = getBackupHistory(false); - for (Iterator iterator = history.iterator(); iterator.hasNext();) { - BackupInfo info = iterator.next(); - if (!backupRoot.equals(info.getTargetRootDir())) { - iterator.remove(); - } - } - return history; - } - - /** - * Get history for a table - * @param name - table name - * @return history for a table - * @throws IOException - */ - public List getBackupHistoryForTable(TableName name) throws IOException { - List history = getBackupHistory(); - List tableHistory = new ArrayList(); - for (BackupInfo info : history) { - List tables = info.getTableNames(); - if (tables.contains(name)) { - tableHistory.add(info); - } - } - return tableHistory; - } - - public Map> - getBackupHistoryForTableSet(Set set, String backupRoot) throws IOException { - List history = getBackupHistory(backupRoot); - Map> tableHistoryMap = - new HashMap>(); - for (Iterator iterator = history.iterator(); iterator.hasNext();) { - BackupInfo info = iterator.next(); - if (!backupRoot.equals(info.getTargetRootDir())) { - continue; - } - List tables = info.getTableNames(); - for (TableName tableName: tables) { - if (set.contains(tableName)) { - ArrayList list = tableHistoryMap.get(tableName); - if (list == null) { - list = new ArrayList(); - tableHistoryMap.put(tableName, list); - } - list.add(info); - } - } - } - return tableHistoryMap; - } - - /** - * Get all backup session with a given status (in desc order by time) - * @param status status - * @return history info of backup contexts - * @throws IOException exception - */ - public ArrayList getBackupContexts(BackupState status) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("get backup contexts from hbase:backup"); - } - - Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); - ArrayList list = new ArrayList(); - - try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { - Result res = null; - while ((res = scanner.next()) != null) { - res.advance(); - BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current()); - if (status != BackupState.ANY && context.getState() != status) { - continue; - } - list.add(context); - } - return list; - } - } - - /** - * Write the current timestamps for each regionserver to hbase:backup after a successful full or - * incremental backup. The saved timestamp is of the last log file that was backed up already. - * @param tables tables - * @param newTimestamps timestamps - * @param backupRoot root directory path to backup - * @throws IOException exception - */ - public void writeRegionServerLogTimestamp(Set tables, - HashMap newTimestamps, String backupRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("write RS log time stamps to hbase:backup for tables [" - + StringUtils.join(tables, ",") + "]"); - } - List puts = new ArrayList(); - for (TableName table : tables) { - byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray(); - Put put = - BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData, - backupRoot); - puts.add(put); - } - try (Table table = connection.getTable(tableName)) { - table.put(puts); - } - } - - /** - * Read the timestamp for each region server log after the last successful backup. Each table has - * its own set of the timestamps. The info is stored for each table as a concatenated string of - * rs->timestapmp - * @param backupRoot root directory path to backup - * @return the timestamp for each region server. key: tableName value: - * RegionServer,PreviousTimeStamp - * @throws IOException exception - */ - public HashMap> readLogTimestampMap(String backupRoot) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("read RS log ts from hbase:backup for root=" + backupRoot); - } - - HashMap> tableTimestampMap = - new HashMap>(); - - Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(backupRoot); - try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { - Result res = null; - while ((res = scanner.next()) != null) { - res.advance(); - Cell cell = res.current(); - byte[] row = CellUtil.cloneRow(cell); - String tabName = BackupSystemTableHelper.getTableNameForReadLogTimestampMap(row); - TableName tn = TableName.valueOf(tabName); - byte[] data = CellUtil.cloneValue(cell); - if (data == null) { - throw new IOException("Data of last backup data from hbase:backup " - + "is empty. Create a backup first."); - } - if (data != null && data.length > 0) { - HashMap lastBackup = - fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); - tableTimestampMap.put(tn, lastBackup); - } - } - return tableTimestampMap; - } - } - - private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, - Map map) { - BackupProtos.TableServerTimestamp.Builder tstBuilder = - BackupProtos.TableServerTimestamp.newBuilder(); - tstBuilder.setTable(ProtobufUtil.toProtoTableName(table)); - - for (Entry entry : map.entrySet()) { - BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); - builder.setServer(entry.getKey()); - builder.setTimestamp(entry.getValue()); - tstBuilder.addServerTimestamp(builder.build()); - } - - return tstBuilder.build(); - } - - private HashMap fromTableServerTimestampProto( - BackupProtos.TableServerTimestamp proto) { - HashMap map = new HashMap(); - List list = proto.getServerTimestampList(); - for (BackupProtos.ServerTimestamp st : list) { - map.put(st.getServer(), st.getTimestamp()); - } - return map; - } - - /** - * Return the current tables covered by incremental backup. - * @param backupRoot root directory path to backup - * @return set of tableNames - * @throws IOException exception - */ - public Set getIncrementalBackupTableSet(String backupRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("get incr backup table set from hbase:backup"); - } - TreeSet set = new TreeSet<>(); - - try (Table table = connection.getTable(tableName)) { - Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(backupRoot); - Result res = table.get(get); - if (res.isEmpty()) { - return set; - } - List cells = res.listCells(); - for (Cell cell : cells) { - // qualifier = table name - we use table names as qualifiers - set.add(TableName.valueOf(CellUtil.cloneQualifier(cell))); - } - return set; - } - } - - /** - * Add tables to global incremental backup set - * @param tables - set of tables - * @param backupRoot root directory path to backup - * @throws IOException exception - */ - public void addIncrementalBackupTableSet(Set tables, String backupRoot) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Add incremental backup table set to hbase:backup. ROOT=" + backupRoot - + " tables [" + StringUtils.join(tables, " ") + "]"); - for (TableName table : tables) { - LOG.debug(table); - } - } - try (Table table = connection.getTable(tableName)) { - Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables, backupRoot); - table.put(put); - } - } - - /** - * Removes incremental backup set - * @param backupRoot backup root - */ - - public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Delete incremental backup table set to hbase:backup. ROOT=" + backupRoot); - } - try (Table table = connection.getTable(tableName)) { - Delete delete = BackupSystemTableHelper.createDeleteForIncrBackupTableSet(backupRoot); - table.delete(delete); - } - } - - /** - * Register WAL files as eligible for deletion - * @param files files - * @param backupId backup id - * @param backupRoot root directory path to backup - * @throws IOException exception - */ - public void addWALFiles(List files, String backupId, String backupRoot) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("add WAL files to hbase:backup: " + backupId + " " + backupRoot + " files [" - + StringUtils.join(files, ",") + "]"); - for (String f : files) { - LOG.debug("add :" + f); - } - } - try (Table table = connection.getTable(tableName)) { - List puts = - BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot); - table.put(puts); - } - } - - /** - * Register WAL files as eligible for deletion - * @param backupRoot root directory path to backup - * @throws IOException exception - */ - public Iterator getWALFilesIterator(String backupRoot) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("get WAL files from hbase:backup"); - } - final Table table = connection.getTable(tableName); - Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot); - final ResultScanner scanner = table.getScanner(scan); - final Iterator it = scanner.iterator(); - return new Iterator() { - - @Override - public boolean hasNext() { - boolean next = it.hasNext(); - if (!next) { - // close all - try { - scanner.close(); - table.close(); - } catch (IOException e) { - LOG.error("Close WAL Iterator", e); - } - } - return next; - } - - @Override - public WALItem next() { - Result next = it.next(); - List cells = next.listCells(); - byte[] buf = cells.get(0).getValueArray(); - int len = cells.get(0).getValueLength(); - int offset = cells.get(0).getValueOffset(); - String backupId = new String(buf, offset, len); - buf = cells.get(1).getValueArray(); - len = cells.get(1).getValueLength(); - offset = cells.get(1).getValueOffset(); - String walFile = new String(buf, offset, len); - buf = cells.get(2).getValueArray(); - len = cells.get(2).getValueLength(); - offset = cells.get(2).getValueOffset(); - String backupRoot = new String(buf, offset, len); - return new WALItem(backupId, walFile, backupRoot); - } - - @Override - public void remove() { - // not implemented - throw new RuntimeException("remove is not supported"); - } - }; - - } - - /** - * Check if WAL file is eligible for deletion Future: to support all backup destinations - * @param file file - * @return true, if - yes. - * @throws IOException exception - */ - public boolean isWALFileDeletable(String file) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Check if WAL file has been already backed up in hbase:backup " + file); - } - try (Table table = connection.getTable(tableName)) { - Get get = BackupSystemTableHelper.createGetForCheckWALFile(file); - Result res = table.get(get); - if (res.isEmpty()) { - return false; - } - return true; - } - } - - /** - * Checks if we have at least one backup session in hbase:backup This API is used by - * BackupLogCleaner - * @return true, if - at least one session exists in hbase:backup table - * @throws IOException exception - */ - public boolean hasBackupSessions() throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Has backup sessions from hbase:backup"); - } - boolean result = false; - Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); - scan.setCaching(1); - try (Table table = connection.getTable(tableName); - ResultScanner scanner = table.getScanner(scan)) { - if (scanner.next() != null) { - result = true; - } - return result; - } - } - - /** - * BACKUP SETS - */ - - /** - * Get backup set list - * @return backup set list - * @throws IOException - */ - public List listBackupSets() throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(" Backup set list"); - } - List list = new ArrayList(); - Table table = null; - ResultScanner scanner = null; - try { - table = connection.getTable(tableName); - Scan scan = BackupSystemTableHelper.createScanForBackupSetList(); - scan.setMaxVersions(1); - scanner = table.getScanner(scan); - Result res = null; - while ((res = scanner.next()) != null) { - res.advance(); - list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current())); - } - return list; - } finally { - if (scanner != null) { - scanner.close(); - } - if (table != null) { - table.close(); - } - } - } - - /** - * Get backup set description (list of tables) - * @param name - set's name - * @return list of tables in a backup set - * @throws IOException - */ - public List describeBackupSet(String name) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(" Backup set describe: " + name); - } - Table table = null; - try { - table = connection.getTable(tableName); - Get get = BackupSystemTableHelper.createGetForBackupSet(name); - Result res = table.get(get); - if (res.isEmpty()) return null; - res.advance(); - String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); - return toList(tables); - } finally { - if (table != null) { - table.close(); - } - } - } - - private List toList(String[] tables) { - List list = new ArrayList(tables.length); - for (String name : tables) { - list.add(TableName.valueOf(name)); - } - return list; - } - - /** - * Add backup set (list of tables) - * @param name - set name - * @param tables - list of tables, comma-separated - * @throws IOException - */ - public void addToBackupSet(String name, String[] newTables) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]"); - } - Table table = null; - String[] union = null; - try { - table = connection.getTable(tableName); - Get get = BackupSystemTableHelper.createGetForBackupSet(name); - Result res = table.get(get); - if (res.isEmpty()) { - union = newTables; - } else { - res.advance(); - String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); - union = merge(tables, newTables); - } - Put put = BackupSystemTableHelper.createPutForBackupSet(name, union); - table.put(put); - } finally { - if (table != null) { - table.close(); - } - } - } - - private String[] merge(String[] tables, String[] newTables) { - List list = new ArrayList(); - // Add all from tables - for (String t : tables) { - list.add(t); - } - for (String nt : newTables) { - if (list.contains(nt)) continue; - list.add(nt); - } - String[] arr = new String[list.size()]; - list.toArray(arr); - return arr; - } - - /** - * Remove tables from backup set (list of tables) - * @param name - set name - * @param tables - list of tables, comma-separated - * @throws IOException - */ - public void removeFromBackupSet(String name, String[] toRemove) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ") - + "]"); - } - Table table = null; - String[] disjoint = null; - try { - table = connection.getTable(tableName); - Get get = BackupSystemTableHelper.createGetForBackupSet(name); - Result res = table.get(get); - if (res.isEmpty()) { - LOG.warn("Backup set '" + name + "' not found."); - return; - } else { - res.advance(); - String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); - disjoint = disjoin(tables, toRemove); - } - if (disjoint.length > 0) { - Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint); - table.put(put); - } else { - // Delete - // describeBackupSet(name); - LOG.warn("Backup set '" + name + "' does not contain tables [" - + StringUtils.join(toRemove, " ") + "]"); - } - } finally { - if (table != null) { - table.close(); - } - } - } - - private String[] disjoin(String[] tables, String[] toRemove) { - List list = new ArrayList(); - // Add all from tables - for (String t : tables) { - list.add(t); - } - for (String nt : toRemove) { - if (list.contains(nt)) { - list.remove(nt); - } - } - String[] arr = new String[list.size()]; - list.toArray(arr); - return arr; - } - - /** - * Delete backup set - * @param name set's name - * @throws IOException - */ - public void deleteBackupSet(String name) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(" Backup set delete: " + name); - } - Table table = null; - try { - table = connection.getTable(tableName); - Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name); - table.delete(del); - } finally { - if (table != null) { - table.close(); - } - } - } - - /** - * Get backup system table descriptor - * @return descriptor - */ - public static HTableDescriptor getSystemTableDescriptor() { - HTableDescriptor tableDesc = new HTableDescriptor(tableName); - HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY); - colSessionsDesc.setMaxVersions(1); - // Time to keep backup sessions (secs) - Configuration config = HBaseConfiguration.create(); - int ttl = config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); - colSessionsDesc.setTimeToLive(ttl); - tableDesc.addFamily(colSessionsDesc); - HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY); - // colDesc.setMaxVersions(1); - tableDesc.addFamily(colMetaDesc); - return tableDesc; - } - - public static String getTableNameAsString() { - return tableName.getNameAsString(); - } - - public static TableName getTableName() { - return tableName; - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java deleted file mode 100644 index 37f29f8..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java +++ /dev/null @@ -1,433 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Bytes; - - -/** - * A collection for methods used by BackupSystemTable. - */ - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class BackupSystemTableHelper { - - /** - * hbase:backup schema: - * 1. Backup sessions rowkey= "session:" + backupId; value = serialized BackupContext - * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode - * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] - * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> last WAL - * timestamp] - * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp - * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name - */ - - private final static String BACKUP_INFO_PREFIX = "session:"; - private final static String START_CODE_ROW = "startcode:"; - private final static String INCR_BACKUP_SET = "incrbackupset:"; - private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:"; - private final static String RS_LOG_TS_PREFIX = "rslogts:"; - private final static String WALS_PREFIX = "wals:"; - private final static String SET_KEY_PREFIX = "backupset:"; - - private final static byte[] EMPTY_VALUE = new byte[] {}; - - // Safe delimiter in a string - private final static String NULL = "\u0000"; - - private BackupSystemTableHelper() { - throw new AssertionError("Instantiating utility class..."); - } - - /** - * Creates Put operation for a given backup context object - * @param context backup context - * @return put operation - * @throws IOException exception - */ - static Put createPutForBackupContext(BackupInfo context) throws IOException { - Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId())); - put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray()); - return put; - } - - /** - * Creates Get operation for a given backup id - * @param backupId - backup's ID - * @return get operation - * @throws IOException exception - */ - static Get createGetForBackupContext(String backupId) throws IOException { - Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId)); - get.addFamily(BackupSystemTable.SESSIONS_FAMILY); - get.setMaxVersions(1); - return get; - } - - /** - * Creates Delete operation for a given backup id - * @param backupId - backup's ID - * @return delete operation - * @throws IOException exception - */ - public static Delete createDeleteForBackupInfo(String backupId) { - Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId)); - del.addFamily(BackupSystemTable.SESSIONS_FAMILY); - return del; - } - - /** - * Converts Result to BackupContext - * @param res - HBase result - * @return backup context instance - * @throws IOException exception - */ - static BackupInfo resultToBackupInfo(Result res) throws IOException { - res.advance(); - Cell cell = res.current(); - return cellToBackupInfo(cell); - } - - /** - * Creates Get operation to retrieve start code from hbase:backup - * @return get operation - * @throws IOException exception - */ - static Get createGetForStartCode(String rootPath) throws IOException { - Get get = new Get(rowkey(START_CODE_ROW, rootPath)); - get.addFamily(BackupSystemTable.META_FAMILY); - get.setMaxVersions(1); - return get; - } - - /** - * Creates Put operation to store start code to hbase:backup - * @return put operation - * @throws IOException exception - */ - static Put createPutForStartCode(String startCode, String rootPath) { - Put put = new Put(rowkey(START_CODE_ROW, rootPath)); - put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes()); - return put; - } - - /** - * Creates Get to retrieve incremental backup table set from hbase:backup - * @return get operation - * @throws IOException exception - */ - static Get createGetForIncrBackupTableSet(String backupRoot) throws IOException { - Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot)); - get.addFamily(BackupSystemTable.META_FAMILY); - get.setMaxVersions(1); - return get; - } - - /** - * Creates Put to store incremental backup table set - * @param tables tables - * @return put operation - */ - static Put createPutForIncrBackupTableSet(Set tables, String backupRoot) { - Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot)); - for (TableName table : tables) { - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()), - EMPTY_VALUE); - } - return put; - } - - /** - * Creates Delete for incremental backup table set - * @param backupRoot backup root - * @return delete operation - */ - static Delete createDeleteForIncrBackupTableSet(String backupRoot) { - Delete delete = new Delete(rowkey(INCR_BACKUP_SET, backupRoot)); - delete.addFamily(BackupSystemTable.META_FAMILY); - return delete; - } - - /** - * Creates Scan operation to load backup history - * @return scan operation - */ - static Scan createScanForBackupHistory() { - Scan scan = new Scan(); - byte[] startRow = BACKUP_INFO_PREFIX.getBytes(); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.setStartRow(startRow); - scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.SESSIONS_FAMILY); - scan.setMaxVersions(1); - return scan; - } - - /** - * Converts cell to backup context instance. - * @param current - cell - * @return backup context instance - * @throws IOException exception - */ - static BackupInfo cellToBackupInfo(Cell current) throws IOException { - byte[] data = CellUtil.cloneValue(current); - return BackupInfo.fromByteArray(data); - } - - /** - * Creates Put to write RS last roll log timestamp map - * @param table - table - * @param smap - map, containing RS:ts - * @return put operation - */ - static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, - String backupRoot) { - Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); - put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap); - return put; - } - - /** - * Creates Scan to load table-> { RS -> ts} map of maps - * @return scan operation - */ - static Scan createScanForReadLogTimestampMap(String backupRoot) { - Scan scan = new Scan(); - byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.setStartRow(startRow); - scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); - - return scan; - } - - /** - * Get table name from rowkey - * @param cloneRow rowkey - * @return table name - */ - static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { - String s = new String(cloneRow); - int index = s.lastIndexOf(NULL); - return s.substring(index + 1); - } - - /** - * Creates Put to store RS last log result - * @param server - server name - * @param timestamp - log roll result (timestamp) - * @return put operation - */ - static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, - String backupRoot) { - Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); - put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), timestamp.toString() - .getBytes()); - return put; - } - - /** - * Creates Scan operation to load last RS log roll results - * @return scan operation - */ - static Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) { - Scan scan = new Scan(); - byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.setStartRow(startRow); - scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); - scan.setMaxVersions(1); - - return scan; - } - - /** - * Get server's name from rowkey - * @param row - rowkey - * @return server's name - */ - static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { - String s = new String(row); - int index = s.lastIndexOf(NULL); - return s.substring(index + 1); - } - - /** - * Creates put list for list of WAL files - * @param files list of WAL file paths - * @param backupId backup id - * @return put list - * @throws IOException exception - */ - public static List createPutsForAddWALFiles(List files, String backupId, - String backupRoot) throws IOException { - - List puts = new ArrayList(); - for (String file : files) { - Put put = new Put(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); - put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes()); - put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes()); - put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes()); - puts.add(put); - } - return puts; - } - - /** - * Creates Scan operation to load WALs TODO: support for backupRoot - * @param backupRoot - path to backup destination - * @return scan operation - */ - public static Scan createScanForGetWALs(String backupRoot) { - Scan scan = new Scan(); - byte[] startRow = WALS_PREFIX.getBytes(); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.setStartRow(startRow); - scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); - return scan; - } - - /** - * Creates Get operation for a given wal file name TODO: support for backup destination - * @param file file - * @return get operation - * @throws IOException exception - */ - public static Get createGetForCheckWALFile(String file) throws IOException { - Get get = new Get(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); - // add backup root column - get.addFamily(BackupSystemTable.META_FAMILY); - return get; - } - - /** - * Creates Scan operation to load backup set list - * @return scan operation - */ - static Scan createScanForBackupSetList() { - Scan scan = new Scan(); - byte[] startRow = SET_KEY_PREFIX.getBytes(); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.setStartRow(startRow); - scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); - return scan; - } - - /** - * Creates Get operation to load backup set content - * @return get operation - */ - static Get createGetForBackupSet(String name) { - Get get = new Get(rowkey(SET_KEY_PREFIX, name)); - get.addFamily(BackupSystemTable.META_FAMILY); - return get; - } - - /** - * Creates Delete operation to delete backup set content - * @param name - backup set's name - * @return delete operation - */ - static Delete createDeleteForBackupSet(String name) { - Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); - del.addFamily(BackupSystemTable.META_FAMILY); - return del; - } - - /** - * Creates Put operation to update backup set content - * @param name - backup set's name - * @param tables - list of tables - * @return put operation - */ - static Put createPutForBackupSet(String name, String[] tables) { - Put put = new Put(rowkey(SET_KEY_PREFIX, name)); - byte[] value = convertToByteArray(tables); - put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); - return put; - } - - private static byte[] convertToByteArray(String[] tables) { - return StringUtils.join(tables, ",").getBytes(); - } - - /** - * Converts cell to backup set list. - * @param current - cell - * @return backup set - * @throws IOException - */ - static String[] cellValueToBackupSet(Cell current) throws IOException { - byte[] data = CellUtil.cloneValue(current); - if (data != null && data.length > 0) { - return new String(data).split(","); - } else { - return new String[0]; - } - } - - /** - * Converts cell key to backup set name. - * @param current - cell - * @return backup set name - * @throws IOException - */ - static String cellKeyToBackupSetName(Cell current) throws IOException { - byte[] data = CellUtil.cloneRow(current); - return new String(data).substring(SET_KEY_PREFIX.length()); - } - - static byte[] rowkey(String s, String... other) { - StringBuilder sb = new StringBuilder(s); - for (String ss : other) { - sb.append(ss); - } - return sb.toString().getBytes(); - } - -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java deleted file mode 100644 index c22f51b..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java +++ /dev/null @@ -1,437 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.util; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.TreeMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.impl.BackupManifest; -import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * A collection of methods used by multiple classes to backup HBase tables. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class BackupClientUtil { - protected static final Log LOG = LogFactory.getLog(BackupClientUtil.class); - public static final String LOGNAME_SEPARATOR = "."; - - private BackupClientUtil() { - throw new AssertionError("Instantiating utility class..."); - } - - /** - * Check whether the backup path exist - * @param backupStr backup - * @param conf configuration - * @return Yes if path exists - * @throws IOException exception - */ - public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException { - boolean isExist = false; - Path backupPath = new Path(backupStr); - FileSystem fileSys = backupPath.getFileSystem(conf); - String targetFsScheme = fileSys.getUri().getScheme(); - if (LOG.isTraceEnabled()) { - LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme); - } - if (fileSys.exists(backupPath)) { - isExist = true; - } - return isExist; - } - - // check target path first, confirm it doesn't exist before backup - public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException { - boolean targetExists = false; - try { - targetExists = checkPathExist(backupRootPath, conf); - } catch (IOException e) { - String expMsg = e.getMessage(); - String newMsg = null; - if (expMsg.contains("No FileSystem for scheme")) { - newMsg = - "Unsupported filesystem scheme found in the backup target url. Error Message: " - + newMsg; - LOG.error(newMsg); - throw new IOException(newMsg); - } else { - throw e; - } - } - - if (targetExists) { - LOG.info("Using existing backup root dir: " + backupRootPath); - } else { - LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created."); - } - } - - /** - * Get the min value for all the Values a map. - * @param map map - * @return the min value - */ - public static Long getMinValue(HashMap map) { - Long minTimestamp = null; - if (map != null) { - ArrayList timestampList = new ArrayList(map.values()); - Collections.sort(timestampList); - // The min among all the RS log timestamps will be kept in hbase:backup table. - minTimestamp = timestampList.get(0); - } - return minTimestamp; - } - - /** - * Parses host name:port from archived WAL path - * @param p path - * @return host name - * @throws IOException exception - */ - public static String parseHostFromOldLog(Path p) { - try { - String n = p.getName(); - int idx = n.lastIndexOf(LOGNAME_SEPARATOR); - String s = URLDecoder.decode(n.substring(0, idx), "UTF8"); - return ServerName.parseHostname(s) + ":" + ServerName.parsePort(s); - } catch (Exception e) { - LOG.warn("Skip log file (can't parse): " + p); - return null; - } - } - - /** - * Given the log file, parse the timestamp from the file name. The timestamp is the last number. - * @param p a path to the log file - * @return the timestamp - * @throws IOException exception - */ - public static Long getCreationTime(Path p) throws IOException { - int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR); - if (idx < 0) { - throw new IOException("Cannot parse timestamp from path " + p); - } - String ts = p.getName().substring(idx + 1); - return Long.parseLong(ts); - } - - public static List getFiles(FileSystem fs, Path rootDir, List files, - PathFilter filter) throws FileNotFoundException, IOException { - RemoteIterator it = fs.listFiles(rootDir, true); - - while (it.hasNext()) { - LocatedFileStatus lfs = it.next(); - if (lfs.isDirectory()) { - continue; - } - // apply filter - if (filter.accept(lfs.getPath())) { - files.add(lfs.getPath().toString()); - } - } - return files; - } - - public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException { - cleanupHLogDir(context, conf); - cleanupTargetDir(context, conf); - } - - /** - * Clean up directories which are generated when DistCp copying hlogs. - * @throws IOException - */ - private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf) - throws IOException { - - String logDir = backupContext.getHLogTargetDir(); - if (logDir == null) { - LOG.warn("No log directory specified for " + backupContext.getBackupId()); - return; - } - - Path rootPath = new Path(logDir).getParent(); - FileSystem fs = FileSystem.get(rootPath.toUri(), conf); - FileStatus[] files = listStatus(fs, rootPath, null); - if (files == null) { - return; - } - for (FileStatus file : files) { - LOG.debug("Delete log files: " + file.getPath().getName()); - fs.delete(file.getPath(), true); - } - } - - /** - * Clean up the data at target directory - */ - private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) { - try { - // clean up the data at target directory - LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId()); - String targetDir = backupInfo.getTargetRootDir(); - if (targetDir == null) { - LOG.warn("No target directory specified for " + backupInfo.getBackupId()); - return; - } - - FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); - - for (TableName table : backupInfo.getTables()) { - Path targetDirPath = - new Path(getTableBackupDir(backupInfo.getTargetRootDir(), backupInfo.getBackupId(), - table)); - if (outputFs.delete(targetDirPath, true)) { - LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); - } else { - LOG.info("No data has been found in " + targetDirPath.toString() + "."); - } - - Path tableDir = targetDirPath.getParent(); - FileStatus[] backups = listStatus(outputFs, tableDir, null); - if (backups == null || backups.length == 0) { - outputFs.delete(tableDir, true); - LOG.debug(tableDir.toString() + " is empty, remove it."); - } - } - outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true); - } catch (IOException e1) { - LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at " - + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); - } - } - - /** - * Given the backup root dir, backup id and the table name, return the backup image location, - * which is also where the backup manifest file is. return value look like: - * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" - * @param backupRootDir backup root directory - * @param backupId backup id - * @param table table name - * @return backupPath String for the particular table - */ - public static String - getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { - return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() - + Path.SEPARATOR; - } - - public static TableName[] parseTableNames(String tables) { - if (tables == null) { - return null; - } - String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); - - TableName[] ret = new TableName[tableArray.length]; - for (int i = 0; i < tableArray.length; i++) { - ret[i] = TableName.valueOf(tableArray[i]); - } - return ret; - } - - /** - * Sort history list by start time in descending order. - * @param historyList history list - * @return sorted list of BackupCompleteData - */ - public static ArrayList sortHistoryListDesc(ArrayList historyList) { - ArrayList list = new ArrayList(); - TreeMap map = new TreeMap(); - for (BackupInfo h : historyList) { - map.put(Long.toString(h.getStartTs()), h); - } - Iterator i = map.descendingKeySet().iterator(); - while (i.hasNext()) { - list.add(map.get(i.next())); - } - return list; - } - - /** - * Returns WAL file name - * @param walFileName WAL file name - * @return WAL file name - * @throws IOException exception - * @throws IllegalArgumentException exception - */ - public static String getUniqueWALFileNamePart(String walFileName) throws IOException { - return getUniqueWALFileNamePart(new Path(walFileName)); - } - - /** - * Returns WAL file name - * @param p - WAL file path - * @return WAL file name - * @throws IOException exception - */ - public static String getUniqueWALFileNamePart(Path p) throws IOException { - return p.getName(); - } - - /** - * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates - * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and - * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. - * @param fs file system - * @param dir directory - * @param filter path filter - * @return null if dir is empty or doesn't exist, otherwise FileStatus array - */ - public static FileStatus[] - listStatus(final FileSystem fs, final Path dir, final PathFilter filter) throws IOException { - FileStatus[] status = null; - try { - status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); - } catch (FileNotFoundException fnfe) { - // if directory doesn't exist, return null - if (LOG.isTraceEnabled()) { - LOG.trace(dir + " doesn't exist"); - } - } - if (status == null || status.length < 1) return null; - return status; - } - - /** - * Return the 'path' component of a Path. In Hadoop, Path is an URI. This method returns the - * 'path' component of a Path's URI: e.g. If a Path is - * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, this method returns - * /hbase_trunk/TestTable/compaction.dir. This method is useful if you want to print - * out a Path without qualifying Filesystem instance. - * @param p Filesystem Path whose 'path' component we are to return. - * @return Path portion of the Filesystem - */ - public static String getPath(Path p) { - return p.toUri().getPath(); - } - - /** - * Given the backup root dir and the backup id, return the log file location for an incremental - * backup. - * @param backupRootDir backup root directory - * @param backupId backup id - * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" - */ - public static String getLogBackupDir(String backupRootDir, String backupId) { - return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR - + HConstants.HREGION_LOGDIR_NAME; - } - - private static List getHistory(Configuration conf, Path backupRootPath) - throws IOException { - // Get all (n) history from backup root destination - FileSystem fs = FileSystem.get(conf); - RemoteIterator it = fs.listLocatedStatus(backupRootPath); - - List infos = new ArrayList(); - while (it.hasNext()) { - LocatedFileStatus lfs = it.next(); - if (!lfs.isDirectory()) continue; - String backupId = lfs.getPath().getName(); - try { - BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs); - infos.add(info); - } catch(IOException e) { - LOG.error("Can not load backup info from: "+ lfs.getPath(), e); - } - } - // Sort - Collections.sort(infos, new Comparator() { - - @Override - public int compare(BackupInfo o1, BackupInfo o2) { - long ts1 = getTimestamp(o1.getBackupId()); - long ts2 = getTimestamp(o2.getBackupId()); - if (ts1 == ts2) return 0; - return ts1 < ts2 ? 1 : -1; - } - - private long getTimestamp(String backupId) { - String[] split = backupId.split("_"); - return Long.parseLong(split[1]); - } - }); - return infos; - } - - public static List getHistory(Configuration conf, int n, Path backupRootPath, - BackupInfo.Filter... filters) throws IOException { - List infos = getHistory(conf, backupRootPath); - List ret = new ArrayList(); - for (BackupInfo info : infos) { - if (ret.size() == n) { - break; - } - boolean passed = true; - for (int i = 0; i < filters.length; i++) { - if (!filters[i].apply(info)) { - passed = false; - break; - } - } - if (passed) { - ret.add(info); - } - } - return ret; - } - - public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs) - throws IOException { - Path backupPath = new Path(backupRootPath, backupId); - - RemoteIterator it = fs.listFiles(backupPath, true); - while (it.hasNext()) { - LocatedFileStatus lfs = it.next(); - if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) { - // Load BackupManifest - BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent()); - BackupInfo info = manifest.toBackupInfo(); - return info; - } - } - return null; - } -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java deleted file mode 100644 index 76402c7..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup.util; -import java.util.List; - -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -/** - * Backup set is a named group of HBase tables, - * which are managed together by Backup/Restore - * framework. Instead of using list of tables in backup or restore - * operation, one can use set's name instead. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class BackupSet { - private final String name; - private final List tables; - - public BackupSet(String name, List tables) { - this.name = name; - this.tables = tables; - } - - public String getName() { - return name; - } - - public List getTables() { - return tables; - } - - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(name).append("={"); - for (int i = 0; i < tables.size(); i++) { - sb.append(tables.get(i)); - if (i < tables.size() - 1) { - sb.append(","); - } - } - sb.append("}"); - return sb.toString(); - } - -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 343dad4..17d5e78 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -37,8 +37,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -1696,13 +1694,6 @@ public interface Admin extends Abortable, Closeable { * @return true if the switch is enabled, false otherwise. */ boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException; - - /** - * Get Backup Admin interface - * @return backup admin object - * @throws IOException exception - */ - BackupAdmin getBackupAdmin() throws IOException; /** * Currently, there are only two compact types: diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java deleted file mode 100644 index 2e5ca2a..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Future; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.RestoreRequest; -import org.apache.hadoop.hbase.backup.util.BackupSet; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -/** - * The administrative API for HBase Backup. Obtain an instance from - * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards. - *

BackupAdmin can be used to create backups, restore data from backups and for - * other backup-related operations. - * - * @see Admin - * @since 2.0 - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving - -public interface BackupAdmin extends Closeable{ - - /** - * Backs up given list of tables fully. Synchronous operation. - * - * @param request BackupRequest instance which contains the following members: - * type whether the backup is full or incremental - * tableList list of tables to backup - * targetRootDir root directory for saving the backup - * workers number of parallel workers. -1 - system defined - * bandwidth bandwidth per worker in MB per second. -1 - unlimited - * @return the backup Id - */ - - public String backupTables(final BackupRequest userRequest) throws IOException; - - /** - * Backs up given list of tables fully. Asynchronous operation. - * - * @param request BackupRequest instance which contains the following members: - * type whether the backup is full or incremental - * tableList list of tables to backup - * targetRootDir root dir for saving the backup - * workers number of paralle workers. -1 - system defined - * bandwidth bandwidth per worker in MB per sec. -1 - unlimited - * @return the backup Id future - */ - public Future backupTablesAsync(final BackupRequest userRequest) throws IOException; - - /** - * Restore backup - * @param request - restore request - * @throws IOException exception - */ - public void restore(RestoreRequest request) throws IOException; - - /** - * Restore backup - * @param request - restore request - * @return Future which client can wait on - * @throws IOException exception - */ - public Future restoreAsync(RestoreRequest request) throws IOException; - - /** - * Describe backup image command - * @param backupId - backup id - * @return backup info - * @throws IOException exception - */ - public BackupInfo getBackupInfo(String backupId) throws IOException; - - /** - * Show backup progress command - * @param backupId - backup id (may be null) - * @return backup progress (0-100%), -1 if no active sessions - * or session not found - * @throws IOException exception - */ - public int getProgress(String backupId) throws IOException; - - /** - * Delete backup image command - * @param backupIds - backup id - * @return total number of deleted sessions - * @throws IOException exception - */ - public int deleteBackups(String[] backupIds) throws IOException; - - /** - * Show backup history command - * @param n - last n backup sessions - * @return list of backup infos - * @throws IOException exception - */ - public List getHistory(int n) throws IOException; - - - /** - * Show backup history command with filters - * @param n - last n backup sessions - * @param f - list of filters - * @return list of backup infos - * @throws IOException exception - */ - public List getHistory(int n, BackupInfo.Filter ... f) throws IOException; - - - /** - * Backup sets list command - list all backup sets. Backup set is - * a named group of tables. - * @return all registered backup sets - * @throws IOException exception - */ - public List listBackupSets() throws IOException; - - /** - * Backup set describe command. Shows list of tables in - * this particular backup set. - * @param name set name - * @return backup set description or null - * @throws IOException exception - */ - public BackupSet getBackupSet(String name) throws IOException; - - /** - * Delete backup set command - * @param name - backup set name - * @return true, if success, false - otherwise - * @throws IOException exception - */ - public boolean deleteBackupSet(String name) throws IOException; - - /** - * Add tables to backup set command - * @param name - name of backup set. - * @param tables - list of tables to be added to this set. - * @throws IOException exception - */ - public void addToBackupSet(String name, TableName[] tables) throws IOException; - - /** - * Remove tables from backup set - * @param name - name of backup set. - * @param tables - list of tables to be removed from this set. - * @throws IOException exception - */ - public void removeFromBackupSet(String name, String[] tables) throws IOException; -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 1ac43f9..c8367b9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1400,19 +1400,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return stub.listProcedures(controller, request); } - @Override - public MasterProtos.BackupTablesResponse backupTables( - RpcController controller, - MasterProtos.BackupTablesRequest request) throws ServiceException { - return stub.backupTables(controller, request); - } - - @Override - public MasterProtos.RestoreTablesResponse restoreTables( - RpcController controller, - MasterProtos.RestoreTablesRequest request) throws ServiceException { - return stub.restoreTables(controller, request); - } @Override public MasterProtos.AddColumnResponse addColumn( diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index aa4b3f6..f6ee79a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -61,9 +61,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.RestoreRequest; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -86,8 +83,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; @@ -149,8 +144,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRespon import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -219,9 +212,7 @@ public class HBaseAdmin implements Admin { // numRetries is for 'normal' stuff... Multiply by this factor when // want to wait a long time. private final int retryLongerMultiplier; - private final int syncWaitTimeout; - private final long backupWaitTimeout; - private final long restoreWaitTimeout; + private final int syncWaitTimeout; private boolean aborted; private int operationTimeout; @@ -248,10 +239,6 @@ public class HBaseAdmin implements Admin { HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.syncWaitTimeout = this.conf.getInt( "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min - this.backupWaitTimeout = this.conf.getInt( - "hbase.client.backup.wait.timeout.sec", 24 * 3600); // 24 h - this.restoreWaitTimeout = this.conf.getInt( - "hbase.client.restore.wait.timeout.sec", 24 * 3600); // 24 h this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); this.ng = this.connection.getNonceGenerator(); @@ -1571,112 +1558,112 @@ public class HBaseAdmin implements Admin { ProtobufUtil.split(admin, hri, splitPoint); } - Future backupTablesAsync(final BackupRequest userRequest) throws IOException { - BackupClientUtil.checkTargetDir(userRequest.getTargetRootDir(), conf); - if (userRequest.getTableList() != null) { - for (TableName table : userRequest.getTableList()) { - if (!tableExists(table)) { - throw new DoNotRetryIOException(table + "does not exist"); - } - } - } - BackupTablesResponse response = executeCallable( - new MasterCallable(getConnection()) { - @Override - public BackupTablesResponse call(int callTimeout) throws ServiceException { - BackupTablesRequest request = RequestConverter.buildBackupTablesRequest( - userRequest.getBackupType(), userRequest.getTableList(), userRequest.getTargetRootDir(), - userRequest.getWorkers(), userRequest.getBandwidth(), - userRequest.getBackupSetName(), ng.getNonceGroup(),ng.newNonce()); - return master.backupTables(null, request); - } - }, (int) backupWaitTimeout); - return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response); - } - - String backupTables(final BackupRequest userRequest) throws IOException { - return get( - backupTablesAsync(userRequest), - backupWaitTimeout, - TimeUnit.SECONDS); - } - - public static class TableBackupFuture extends TableFuture { - String backupId; - public TableBackupFuture(final HBaseAdmin admin, final TableName tableName, - final BackupTablesResponse response) { - super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); - backupId = response.getBackupId(); - } - - String getBackupId() { - return backupId; - } - - @Override - public String getOperationType() { - return "BACKUP"; - } - - @Override - protected String convertResult(final GetProcedureResultResponse response) throws IOException { - if (response.hasException()) { - throw ForeignExceptionUtil.toIOException(response.getException()); - } - ByteString result = response.getResult(); - if (result == null) return null; - return Bytes.toStringBinary(result.toByteArray()); - } - - @Override - protected String postOperationResult(final String result, - final long deadlineTs) throws IOException, TimeoutException { - return result; - } - } - - /** - * Restore operation. - * @param request RestoreRequest instance - * @throws IOException - */ - public Future restoreTablesAsync(final RestoreRequest userRequest) throws IOException { - RestoreTablesResponse response = executeCallable( - new MasterCallable(getConnection()) { - @Override - public RestoreTablesResponse call(int callTimeout) throws ServiceException { - try { - RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest( - userRequest.getBackupRootDir(), userRequest.getBackupId(), - userRequest.isCheck(), userRequest.getFromTables(), userRequest.getToTables(), - userRequest.isOverwrite(), ng.getNonceGroup(), ng.newNonce()); - return master.restoreTables(null, request); - } catch (IOException ioe) { - throw new ServiceException(ioe); - } - } - }); - return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response); - } - - public void restoreTables(final RestoreRequest userRequest) throws IOException { - get(restoreTablesAsync(userRequest), - restoreWaitTimeout, TimeUnit.SECONDS); - } - - private static class TableRestoreFuture extends TableFuture { - public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName, - final RestoreTablesResponse response) { - super(admin, tableName, - (response != null) ? response.getProcId() : null); - } - - @Override - public String getOperationType() { - return "RESTORE"; - } - } +// Future backupTablesAsync(final BackupRequest userRequest) throws IOException { +// BackupClientUtil.checkTargetDir(userRequest.getTargetRootDir(), conf); +// if (userRequest.getTableList() != null) { +// for (TableName table : userRequest.getTableList()) { +// if (!tableExists(table)) { +// throw new DoNotRetryIOException(table + "does not exist"); +// } +// } +// } +// BackupTablesResponse response = executeCallable( +// new MasterCallable(getConnection()) { +// @Override +// public BackupTablesResponse call(int callTimeout) throws ServiceException { +// BackupTablesRequest request = RequestConverter.buildBackupTablesRequest( +// userRequest.getBackupType(), userRequest.getTableList(), userRequest.getTargetRootDir(), +// userRequest.getWorkers(), userRequest.getBandwidth(), +// userRequest.getBackupSetName(), ng.getNonceGroup(),ng.newNonce()); +// return master.backupTables(null, request); +// } +// }, (int) backupWaitTimeout); +// return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response); +// } +// +// String backupTables(final BackupRequest userRequest) throws IOException { +// return get( +// backupTablesAsync(userRequest), +// backupWaitTimeout, +// TimeUnit.SECONDS); +// } +// +// public static class TableBackupFuture extends TableFuture { +// String backupId; +// public TableBackupFuture(final HBaseAdmin admin, final TableName tableName, +// final BackupTablesResponse response) { +// super(admin, tableName, +// (response != null && response.hasProcId()) ? response.getProcId() : null); +// backupId = response.getBackupId(); +// } +// +// String getBackupId() { +// return backupId; +// } +// +// @Override +// public String getOperationType() { +// return "BACKUP"; +// } +// +// @Override +// protected String convertResult(final GetProcedureResultResponse response) throws IOException { +// if (response.hasException()) { +// throw ForeignExceptionUtil.toIOException(response.getException()); +// } +// ByteString result = response.getResult(); +// if (result == null) return null; +// return Bytes.toStringBinary(result.toByteArray()); +// } +// +// @Override +// protected String postOperationResult(final String result, +// final long deadlineTs) throws IOException, TimeoutException { +// return result; +// } +// } +// +// /** +// * Restore operation. +// * @param request RestoreRequest instance +// * @throws IOException +// */ +// public Future restoreTablesAsync(final RestoreRequest userRequest) throws IOException { +// RestoreTablesResponse response = executeCallable( +// new MasterCallable(getConnection()) { +// @Override +// public RestoreTablesResponse call(int callTimeout) throws ServiceException { +// try { +// RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest( +// userRequest.getBackupRootDir(), userRequest.getBackupId(), +// userRequest.isCheck(), userRequest.getFromTables(), userRequest.getToTables(), +// userRequest.isOverwrite(), ng.getNonceGroup(), ng.newNonce()); +// return master.restoreTables(null, request); +// } catch (IOException ioe) { +// throw new ServiceException(ioe); +// } +// } +// }); +// return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response); +// } +// +// public void restoreTables(final RestoreRequest userRequest) throws IOException { +// get(restoreTablesAsync(userRequest), +// restoreWaitTimeout, TimeUnit.SECONDS); +// } +// +// private static class TableRestoreFuture extends TableFuture { +// public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName, +// final RestoreTablesResponse response) { +// super(admin, tableName, +// (response != null) ? response.getProcId() : null); +// } +// +// @Override +// public String getOperationType() { +// return "RESTORE"; +// } +// } @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) @@ -3544,9 +3531,5 @@ public class HBaseAdmin implements Admin { HConstants.EMPTY_END_ROW, false, 0); } - @Override - public BackupAdmin getBackupAdmin() throws IOException { - return new HBaseBackupAdmin(this); - } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java deleted file mode 100644 index dfa2fb1..0000000 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java +++ /dev/null @@ -1,439 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Future; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.RestoreRequest; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.backup.util.BackupSet; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * The administrative API implementation for HBase Backup . Obtain an instance from - * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards. - *

BackupAdmin can be used to create backups, restore data from backups and for - * other backup-related operations. - * - * @see Admin - * @since 2.0 - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving - -public class HBaseBackupAdmin implements BackupAdmin { - private static final Log LOG = LogFactory.getLog(HBaseBackupAdmin.class); - - private final HBaseAdmin admin; - private final Connection conn; - - HBaseBackupAdmin(HBaseAdmin admin) { - this.admin = admin; - this.conn = admin.getConnection(); - } - - @Override - public void close() throws IOException { - } - - @Override - public BackupInfo getBackupInfo(String backupId) throws IOException { - BackupInfo backupInfo = null; - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - backupInfo = table.readBackupInfo(backupId); - return backupInfo; - } - } - - @Override - public int getProgress(String backupId) throws IOException { - BackupInfo backupInfo = null; - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - if (backupId == null) { - ArrayList recentSessions = table.getBackupContexts(BackupState.RUNNING); - if (recentSessions.isEmpty()) { - LOG.warn("No ongoing sessions found."); - return -1; - } - // else show status for ongoing session - // must be one maximum - return recentSessions.get(0).getProgress(); - } else { - - backupInfo = table.readBackupInfo(backupId); - if (backupInfo != null) { - return backupInfo.getProgress(); - } else { - LOG.warn("No information found for backupID=" + backupId); - return -1; - } - } - } - } - - @Override - public int deleteBackups(String[] backupIds) throws IOException { - // TODO: requires FT, failure will leave system - // in non-consistent state - // see HBASE-15227 - - int totalDeleted = 0; - Map> allTablesMap = new HashMap>(); - - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { - for (int i = 0; i < backupIds.length; i++) { - BackupInfo info = sysTable.readBackupInfo(backupIds[i]); - if (info != null) { - String rootDir = info.getTargetRootDir(); - HashSet allTables = allTablesMap.get(rootDir); - if (allTables == null) { - allTables = new HashSet(); - allTablesMap.put(rootDir, allTables); - } - allTables.addAll(info.getTableNames()); - totalDeleted += deleteBackup(backupIds[i], sysTable); - } - } - finalizeDelete(allTablesMap, sysTable); - } - return totalDeleted; - } - - /** - * Updates incremental backup set for every backupRoot - * @param tablesMap - Map [backupRoot: Set] - * @param table - backup system table - * @throws IOException - */ - - private void finalizeDelete(Map> tablesMap, BackupSystemTable table) - throws IOException { - for (String backupRoot : tablesMap.keySet()) { - Set incrTableSet = table.getIncrementalBackupTableSet(backupRoot); - Map> tableMap = - table.getBackupHistoryForTableSet(incrTableSet, backupRoot); - for(Map.Entry> entry: tableMap.entrySet()) { - if(entry.getValue() == null) { - // No more backups for a table - incrTableSet.remove(entry.getKey()); - } - } - if (!incrTableSet.isEmpty()) { - table.addIncrementalBackupTableSet(incrTableSet, backupRoot); - } else { // empty - table.deleteIncrementalBackupTableSet(backupRoot); - } - } - } - - /** - * Delete single backup and all related backups - * Algorithm: - * - * Backup type: FULL or INCREMENTAL - * Is this last backup session for table T: YES or NO - * For every table T from table list 'tables': - * if(FULL, YES) deletes only physical data (PD) - * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until - * we either reach the most recent backup for T in the system or FULL backup which - * includes T - * if(INCREMENTAL, YES) deletes only physical data (PD) - * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images - * between last FULL backup, which is older than the backup being deleted and the next - * FULL backup (if exists) or last one for a particular table T and removes T from list - * of backup tables. - * @param backupId - backup id - * @param sysTable - backup system table - * @return total - number of deleted backup images - * @throws IOException - */ - private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { - - BackupInfo backupInfo = sysTable.readBackupInfo(backupId); - - int totalDeleted = 0; - if (backupInfo != null) { - LOG.info("Deleting backup " + backupInfo.getBackupId() + " ..."); - BackupClientUtil.cleanupBackupData(backupInfo, admin.getConfiguration()); - // List of tables in this backup; - List tables = backupInfo.getTableNames(); - long startTime = backupInfo.getStartTs(); - for (TableName tn : tables) { - boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime); - if (isLastBackupSession) { - continue; - } - // else - List affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable); - for (BackupInfo info : affectedBackups) { - if (info.equals(backupInfo)) { - continue; - } - removeTableFromBackupImage(info, tn, sysTable); - } - } - LOG.debug("Delete backup info "+ backupInfo.getBackupId()); - - sysTable.deleteBackupInfo(backupInfo.getBackupId()); - LOG.info("Delete backup " + backupInfo.getBackupId() + " completed."); - totalDeleted++; - } else { - LOG.warn("Delete backup failed: no information found for backupID=" + backupId); - } - return totalDeleted; - } - - private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) - throws IOException { - List tables = info.getTableNames(); - LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" + - info.getTableListAsString()); - if (tables.contains(tn)) { - tables.remove(tn); - - if (tables.isEmpty()) { - LOG.debug("Delete backup info "+ info.getBackupId()); - - sysTable.deleteBackupInfo(info.getBackupId()); - BackupClientUtil.cleanupBackupData(info, conn.getConfiguration()); - } else { - info.setTables(tables); - sysTable.updateBackupInfo(info); - // Now, clean up directory for table - cleanupBackupDir(info, tn, conn.getConfiguration()); - } - } - } - - private List getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, - BackupSystemTable table) throws IOException { - LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); - long ts = backupInfo.getStartTs(); - List list = new ArrayList(); - List history = table.getBackupHistory(backupInfo.getTargetRootDir()); - // Scan from most recent to backupInfo - // break when backupInfo reached - for (BackupInfo info : history) { - if (info.getStartTs() == ts) { - break; - } - List tables = info.getTableNames(); - if (tables.contains(tn)) { - BackupType bt = info.getType(); - if (bt == BackupType.FULL) { - // Clear list if we encounter FULL backup - list.clear(); - } else { - LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn - + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); - list.add(info); - } - } - } - return list; - } - - - - /** - * Clean up the data at target directory - * @throws IOException - */ - private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) - throws IOException { - try { - // clean up the data at target directory - String targetDir = backupInfo.getTargetRootDir(); - if (targetDir == null) { - LOG.warn("No target directory specified for " + backupInfo.getBackupId()); - return; - } - - FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); - - Path targetDirPath = - new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(), - backupInfo.getBackupId(), table)); - if (outputFs.delete(targetDirPath, true)) { - LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); - } else { - LOG.info("No data has been found in " + targetDirPath.toString() + "."); - } - - } catch (IOException e1) { - LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table - + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); - throw e1; - } - } - - private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) - throws IOException { - List history = table.getBackupHistory(); - for (BackupInfo info : history) { - List tables = info.getTableNames(); - if (!tables.contains(tn)) { - continue; - } - if (info.getStartTs() <= startTime) { - return true; - } else { - return false; - } - } - return false; - } - - @Override - public List getHistory(int n) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - List history = table.getBackupHistory(); - if (history.size() <= n) return history; - List list = new ArrayList(); - for (int i = 0; i < n; i++) { - list.add(history.get(i)); - } - return list; - } - } - - @Override - public List getHistory(int n, BackupInfo.Filter ... filters) throws IOException { - if (filters.length == 0) return getHistory(n); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - List history = table.getBackupHistory(); - List result = new ArrayList(); - for(BackupInfo bi: history) { - if(result.size() == n) break; - boolean passed = true; - for(int i=0; i < filters.length; i++) { - if(!filters[i].apply(bi)) { - passed = false; - break; - } - } - if(passed) { - result.add(bi); - } - } - return result; - } - } - - @Override - public List listBackupSets() throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - List list = table.listBackupSets(); - List bslist = new ArrayList(); - for (String s : list) { - List tables = table.describeBackupSet(s); - if (tables != null) { - bslist.add(new BackupSet(s, tables)); - } - } - return bslist; - } - } - - @Override - public BackupSet getBackupSet(String name) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - List list = table.describeBackupSet(name); - if (list == null) return null; - return new BackupSet(name, list); - } - } - - @Override - public boolean deleteBackupSet(String name) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - if (table.describeBackupSet(name) == null) { - return false; - } - table.deleteBackupSet(name); - return true; - } - } - - @Override - public void addToBackupSet(String name, TableName[] tables) throws IOException { - String[] tableNames = new String[tables.length]; - for (int i = 0; i < tables.length; i++) { - tableNames[i] = tables[i].getNameAsString(); - if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { - throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); - } - } - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - table.addToBackupSet(name, tableNames); - LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name - + "' backup set"); - } - } - - @Override - public void removeFromBackupSet(String name, String[] tables) throws IOException { - LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { - table.removeFromBackupSet(name, tables); - LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name - + "' completed."); - } - } - - @Override - public void restore(RestoreRequest request) throws IOException { - admin.restoreTables(request); - } - - @Override - public Future restoreAsync(RestoreRequest request) throws IOException { - return admin.restoreTablesAsync(request); - } - - @Override - public String backupTables(final BackupRequest userRequest) throws IOException { - return admin.backupTables(userRequest); - } - - @Override - public Future backupTablesAsync(final BackupRequest userRequest) throws IOException { - return admin.backupTablesAsync(userRequest); - } - -} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 0d1700e..1f14191 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -63,7 +63,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; @@ -105,7 +104,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1271,48 +1269,6 @@ public final class RequestConverter { return builder.build(); } - public static BackupTablesRequest buildBackupTablesRequest( - final BackupType type, List tableList, String targetRootDir, final int workers, - final long bandwidth, String setName, final long nonceGroup, final long nonce) { - BackupTablesRequest.Builder builder = BackupTablesRequest.newBuilder(); - builder.setType(ProtobufUtil.toProtoBackupType(type)); - builder.setTargetRootDir(targetRootDir); - builder.setWorkers(workers); - builder.setBandwidth(bandwidth); - if(setName != null) { - builder.setBackupSetName(setName); - } - if (tableList != null) { - for (TableName table : tableList) { - builder.addTables(ProtobufUtil.toProtoTableName(table)); - } - } - builder.setNonceGroup(nonceGroup).setNonce(nonce); - return builder.build(); - } - - public static RestoreTablesRequest buildRestoreTablesRequest(String backupRootDir, - String backupId, boolean check, TableName[] sTableList, - TableName[] tTableList, boolean isOverwrite, final long nonceGroup, final long nonce) - throws IOException { - RestoreTablesRequest.Builder builder = RestoreTablesRequest.newBuilder(); - builder.setBackupId(backupId).setBackupRootDir(backupRootDir); - builder.setDependencyCheckOnly(check).setOverwrite(isOverwrite); - if (sTableList != null) { - for (TableName table : sTableList) { - builder.addTables(ProtobufUtil.toProtoTableName(table)); - } - } else { - throw new IOException("Source table list shouldn't be empty"); - } - if (tTableList != null) { - for (TableName table : tTableList) { - builder.addTargetTables(ProtobufUtil.toProtoTableName(table)); - } - } - builder.setNonceGroup(nonceGroup).setNonce(nonce); - return builder.build(); - } /** * Creates a protocol buffer GetSchemaAlterStatusRequest diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index 4bcfa4e..bbb878f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.snapshot; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; @@ -45,7 +44,7 @@ public class ClientSnapshotDescriptionUtils { // make sure the table name is valid, this will implicitly check validity TableName tableName = TableName.valueOf(snapshot.getTable()); - if (tableName.isSystemTable() && !BackupSystemTable.getTableName().equals(tableName)) { + if (tableName.isSystemTable() && !tableName.toString().equals("hbase:backup")) { // allow hbase:backup table snapshot throw new IllegalArgumentException("System table snapshots are not allowed"); } diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index 7e947aa..95363e5 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -32,12 +32,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupAdmin; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -160,9 +161,11 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { List tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2); HBaseAdmin admin = null; admin = (HBaseAdmin) conn.getAdmin(); + BackupAdmin client = new HBaseBackupAdmin(util.getConnection()); + BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdFull = admin.getBackupAdmin().backupTables(request); + String backupIdFull = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table loadData(TABLE_NAME1, rowsInBatch); @@ -178,11 +181,10 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request); + String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 - restore full backup for all tables, without overwrite TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 }; - BackupAdmin client = util.getAdmin().getBackupAdmin(); client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, null, true)); // #5.1 - check tables for full restore diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java index 4699c81..c5220cc 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java @@ -9,198 +9,27 @@ public final class BackupProtos { com.google.protobuf.ExtensionRegistry registry) { } /** - * Protobuf enum {@code hbase.pb.FullTableBackupState} - */ - public enum FullTableBackupState - implements com.google.protobuf.ProtocolMessageEnum { - /** - * PRE_SNAPSHOT_TABLE = 1; - */ - PRE_SNAPSHOT_TABLE(0, 1), - /** - * SNAPSHOT_TABLES = 2; - */ - SNAPSHOT_TABLES(1, 2), - /** - * SNAPSHOT_COPY = 3; - */ - SNAPSHOT_COPY(2, 3), - /** - * BACKUP_COMPLETE = 4; - */ - BACKUP_COMPLETE(3, 4), - ; - - /** - * PRE_SNAPSHOT_TABLE = 1; - */ - public static final int PRE_SNAPSHOT_TABLE_VALUE = 1; - /** - * SNAPSHOT_TABLES = 2; - */ - public static final int SNAPSHOT_TABLES_VALUE = 2; - /** - * SNAPSHOT_COPY = 3; - */ - public static final int SNAPSHOT_COPY_VALUE = 3; - /** - * BACKUP_COMPLETE = 4; - */ - public static final int BACKUP_COMPLETE_VALUE = 4; - - - public final int getNumber() { return value; } - - public static FullTableBackupState valueOf(int value) { - switch (value) { - case 1: return PRE_SNAPSHOT_TABLE; - case 2: return SNAPSHOT_TABLES; - case 3: return SNAPSHOT_COPY; - case 4: return BACKUP_COMPLETE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public FullTableBackupState findValueByNumber(int number) { - return FullTableBackupState.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); - } - - private static final FullTableBackupState[] VALUES = values(); - - public static FullTableBackupState valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private FullTableBackupState(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.FullTableBackupState) - } - - /** - * Protobuf enum {@code hbase.pb.IncrementalTableBackupState} - */ - public enum IncrementalTableBackupState - implements com.google.protobuf.ProtocolMessageEnum { - /** - * PREPARE_INCREMENTAL = 1; - */ - PREPARE_INCREMENTAL(0, 1), - /** - * INCREMENTAL_COPY = 2; - */ - INCREMENTAL_COPY(1, 2), - /** - * INCR_BACKUP_COMPLETE = 3; - */ - INCR_BACKUP_COMPLETE(2, 3), - ; - - /** - * PREPARE_INCREMENTAL = 1; - */ - public static final int PREPARE_INCREMENTAL_VALUE = 1; - /** - * INCREMENTAL_COPY = 2; - */ - public static final int INCREMENTAL_COPY_VALUE = 2; - /** - * INCR_BACKUP_COMPLETE = 3; - */ - public static final int INCR_BACKUP_COMPLETE_VALUE = 3; - - - public final int getNumber() { return value; } - - public static IncrementalTableBackupState valueOf(int value) { - switch (value) { - case 1: return PREPARE_INCREMENTAL; - case 2: return INCREMENTAL_COPY; - case 3: return INCR_BACKUP_COMPLETE; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public IncrementalTableBackupState findValueByNumber(int number) { - return IncrementalTableBackupState.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(1); - } - - private static final IncrementalTableBackupState[] VALUES = values(); - - public static IncrementalTableBackupState valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private IncrementalTableBackupState(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.IncrementalTableBackupState) - } - - /** * Protobuf enum {@code hbase.pb.BackupType} + * + *

+   *enum FullTableBackupState {
+   *PRE_SNAPSHOT_TABLE = 1;
+   *SNAPSHOT_TABLES = 2;
+   *SNAPSHOT_COPY = 3;
+   *BACKUP_COMPLETE = 4;
+   *}
+   *
+   *enum IncrementalTableBackupState {
+   *PREPARE_INCREMENTAL = 1;
+   *INCREMENTAL_COPY = 2;
+   *INCR_BACKUP_COMPLETE = 3;
+   *}
+   *
+   *message SnapshotTableStateData {
+   *required TableName table = 1;
+   *required string snapshotName = 2;
+   *}
+   * 
*/ public enum BackupType implements com.google.protobuf.ProtocolMessageEnum { @@ -256,7 +85,7 @@ public final class BackupProtos { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(2); + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); } private static final BackupType[] VALUES = values(); @@ -281,57 +110,138 @@ public final class BackupProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType) } - public interface SnapshotTableStateDataOrBuilder + public interface BackupImageOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.TableName table = 1; + // required string backup_id = 1; /** - * required .hbase.pb.TableName table = 1; + * required string backup_id = 1; */ - boolean hasTable(); + boolean hasBackupId(); /** - * required .hbase.pb.TableName table = 1; + * required string backup_id = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + java.lang.String getBackupId(); /** - * required .hbase.pb.TableName table = 1; + * required string backup_id = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType backup_type = 2; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + boolean hasBackupType(); + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType(); - // required string snapshotName = 2; + // required string root_dir = 3; /** - * required string snapshotName = 2; + * required string root_dir = 3; */ - boolean hasSnapshotName(); + boolean hasRootDir(); /** - * required string snapshotName = 2; + * required string root_dir = 3; */ - java.lang.String getSnapshotName(); + java.lang.String getRootDir(); /** - * required string snapshotName = 2; + * required string root_dir = 3; */ com.google.protobuf.ByteString - getSnapshotNameBytes(); + getRootDirBytes(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // repeated .hbase.pb.BackupImage ancestors = 7; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + int getAncestorsCount(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index); } /** - * Protobuf type {@code hbase.pb.SnapshotTableStateData} + * Protobuf type {@code hbase.pb.BackupImage} */ - public static final class SnapshotTableStateData extends + public static final class BackupImage extends com.google.protobuf.GeneratedMessage - implements SnapshotTableStateDataOrBuilder { - // Use SnapshotTableStateData.newBuilder() to construct. - private SnapshotTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + implements BackupImageOrBuilder { + // Use BackupImage.newBuilder() to construct. + private BackupImage(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private SnapshotTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final SnapshotTableStateData defaultInstance; - public static SnapshotTableStateData getDefaultInstance() { + private static final BackupImage defaultInstance; + public static BackupImage getDefaultInstance() { return defaultInstance; } - public SnapshotTableStateData getDefaultInstanceForType() { + public BackupImage getDefaultInstanceForType() { return defaultInstance; } @@ -341,7 +251,7 @@ public final class BackupProtos { getUnknownFields() { return this.unknownFields; } - private SnapshotTableStateData( + private BackupImage( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -365,21 +275,50 @@ public final class BackupProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = table_.toBuilder(); - } - table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(table_); - table_ = subBuilder.buildPartial(); - } bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); break; } - case 18: { - bitField0_ |= 0x00000002; - snapshotName_ = input.readBytes(); + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + backupType_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + rootDir_ = input.readBytes(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); break; } } @@ -390,74 +329,117 @@ public final class BackupProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SnapshotTableStateData parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupImage parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new SnapshotTableStateData(input, extensionRegistry); + return new BackupImage(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.TableName table = 1; - public static final int TABLE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; /** - * required .hbase.pb.TableName table = 1; + * required string backup_id = 1; */ - public boolean hasTable() { + public boolean hasBackupId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.TableName table = 1; + * required string backup_id = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { - return table_; + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } } /** - * required .hbase.pb.TableName table = 1; + * required string backup_id = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { - return table_; + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - // required string snapshotName = 2; - public static final int SNAPSHOTNAME_FIELD_NUMBER = 2; - private java.lang.Object snapshotName_; + // required .hbase.pb.BackupType backup_type = 2; + public static final int BACKUP_TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_; /** - * required string snapshotName = 2; + * required .hbase.pb.BackupType backup_type = 2; */ - public boolean hasSnapshotName() { + public boolean hasBackupType() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required string snapshotName = 2; + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + + // required string root_dir = 3; + public static final int ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object rootDir_; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; */ - public java.lang.String getSnapshotName() { - java.lang.Object ref = snapshotName_; + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -465,61 +447,205 @@ public final class BackupProtos { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - snapshotName_ = s; + rootDir_ = s; } return s; } } /** - * required string snapshotName = 2; + * required string root_dir = 3; */ public com.google.protobuf.ByteString - getSnapshotNameBytes() { - java.lang.Object ref = snapshotName_; + getRootDirBytes() { + java.lang.Object ref = rootDir_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - snapshotName_ = b; + rootDir_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } + + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // repeated .hbase.pb.BackupImage ancestors = 7; + public static final int ANCESTORS_FIELD_NUMBER = 7; + private java.util.List ancestors_; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsOrBuilderList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + return ancestors_.size(); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + return ancestors_.get(index); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + return ancestors_.get(index); + } + private void initFields() { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - snapshotName_ = ""; + backupId_ = ""; + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + rootDir_ = ""; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + ancestors_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasTable()) { + if (!hasBackupId()) { memoizedIsInitialized = 0; return false; } - if (!hasSnapshotName()) { + if (!hasBackupType()) { memoizedIsInitialized = 0; return false; } - if (!getTable().isInitialized()) { + if (!hasRootDir()) { memoizedIsInitialized = 0; return false; } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, table_); + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getSnapshotNameBytes()); + output.writeEnum(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + output.writeMessage(7, ancestors_.get(i)); } getUnknownFields().writeTo(output); } @@ -532,11 +658,31 @@ public final class BackupProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, table_); + .computeBytesSize(1, getBackupIdBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getSnapshotNameBytes()); + .computeEnumSize(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, ancestors_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -555,22 +701,41 @@ public final class BackupProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) obj; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj; boolean result = true; - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasBackupType() == other.hasBackupType()); + if (hasBackupType()) { + result = result && + (getBackupType() == other.getBackupType()); } - result = result && (hasSnapshotName() == other.hasSnapshotName()); - if (hasSnapshotName()) { - result = result && getSnapshotName() - .equals(other.getSnapshotName()); + result = result && (hasRootDir() == other.hasRootDir()); + if (hasRootDir()) { + result = result && getRootDir() + .equals(other.getRootDir()); + } + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); } + result = result && getAncestorsList() + .equals(other.getAncestorsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -584,66 +749,86 @@ public final class BackupProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasBackupType()) { + hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getBackupType()); + } + if (hasRootDir()) { + hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getRootDir().hashCode(); } - if (hasSnapshotName()) { - hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; - hash = (53 * hash) + getSnapshotName().hashCode(); + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); + } + if (getAncestorsCount() > 0) { + hash = (37 * hash) + ANCESTORS_FIELD_NUMBER; + hash = (53 * hash) + getAncestorsList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -652,7 +837,7 @@ public final class BackupProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -664,24 +849,24 @@ public final class BackupProtos { return builder; } /** - * Protobuf type {@code hbase.pb.SnapshotTableStateData} + * Protobuf type {@code hbase.pb.BackupImage} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateDataOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -693,7 +878,8 @@ public final class BackupProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableFieldBuilder(); + getTableListFieldBuilder(); + getAncestorsFieldBuilder(); } } private static Builder create() { @@ -702,14 +888,28 @@ public final class BackupProtos { public Builder clear() { super.clear(); - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - } else { - tableBuilder_.clear(); - } + backupId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - snapshotName_ = ""; + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; bitField0_ = (bitField0_ & ~0x00000002); + rootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tableListBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ancestorsBuilder_.clear(); + } return this; } @@ -719,2731 +919,997 @@ public final class BackupProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData(this); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (tableBuilder_ == null) { - result.table_ = table_; - } else { - result.table_ = tableBuilder_.build(); - } + result.backupId_ = backupId_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.snapshotName_ = snapshotName_; + result.backupType_ = backupType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.rootDir_ = rootDir_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; + } else { + result.tableList_ = tableListBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (ancestorsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.ancestors_ = ancestors_; + } else { + result.ancestors_ = ancestorsBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance()) return this; - if (other.hasTable()) { - mergeTable(other.getTable()); - } - if (other.hasSnapshotName()) { - bitField0_ |= 0x00000002; - snapshotName_ = other.snapshotName_; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; onChanged(); } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasTable()) { - - return false; - } - if (!hasSnapshotName()) { - - return false; + if (other.hasBackupType()) { + setBackupType(other.getBackupType()); } - if (!getTable().isInitialized()) { - - return false; + if (other.hasRootDir()) { + bitField0_ |= 0x00000004; + rootDir_ = other.rootDir_; + onChanged(); } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); } - } - return this; - } - private int bitField0_; - - // required .hbase.pb.TableName table = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; - /** - * required .hbase.pb.TableName table = 1; - */ - public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { - if (tableBuilder_ == null) { - return table_; } else { - return tableBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } } - table_ = value; - onChanged(); - } else { - tableBuilder_.setMessage(value); } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder setTable( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableBuilder_ == null) { - table_ = builderForValue.build(); - onChanged(); - } else { - tableBuilder_.setMessage(builderForValue.build()); + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); } - bitField0_ |= 0x00000001; - return this; - } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { - table_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); - } else { - table_ = value; + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (ancestorsBuilder_ == null) { + if (!other.ancestors_.isEmpty()) { + if (ancestors_.isEmpty()) { + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureAncestorsIsMutable(); + ancestors_.addAll(other.ancestors_); + } + onChanged(); } - onChanged(); } else { - tableBuilder_.mergeFrom(value); + if (!other.ancestors_.isEmpty()) { + if (ancestorsBuilder_.isEmpty()) { + ancestorsBuilder_.dispose(); + ancestorsBuilder_ = null; + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + ancestorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAncestorsFieldBuilder() : null; + } else { + ancestorsBuilder_.addAllMessages(other.ancestors_); + } + } } - bitField0_ |= 0x00000001; + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder clearTable() { - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableBuilder_.clear(); + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTableFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { - if (tableBuilder_ != null) { - return tableBuilder_.getMessageOrBuilder(); - } else { - return table_; + if (!hasBackupType()) { + + return false; + } + if (!hasRootDir()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + + return false; + } } + return true; } - /** - * required .hbase.pb.TableName table = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTableFieldBuilder() { - if (tableBuilder_ == null) { - tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - table_, - getParentForChildren(), - isClean()); - table_ = null; + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } } - return tableBuilder_; + return this; } + private int bitField0_; - // required string snapshotName = 2; - private java.lang.Object snapshotName_ = ""; + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; /** - * required string snapshotName = 2; + * required string backup_id = 1; */ - public boolean hasSnapshotName() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string snapshotName = 2; + * required string backup_id = 1; */ - public java.lang.String getSnapshotName() { - java.lang.Object ref = snapshotName_; + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - snapshotName_ = s; + backupId_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string snapshotName = 2; + * required string backup_id = 1; */ public com.google.protobuf.ByteString - getSnapshotNameBytes() { - java.lang.Object ref = snapshotName_; + getBackupIdBytes() { + java.lang.Object ref = backupId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - snapshotName_ = b; + backupId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string snapshotName = 2; + * required string backup_id = 1; */ - public Builder setSnapshotName( + public Builder setBackupId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; - snapshotName_ = value; + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } /** - * required string snapshotName = 2; + * required string backup_id = 1; */ - public Builder clearSnapshotName() { - bitField0_ = (bitField0_ & ~0x00000002); - snapshotName_ = getDefaultInstance().getSnapshotName(); + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); onChanged(); return this; } /** - * required string snapshotName = 2; + * required string backup_id = 1; */ - public Builder setSnapshotNameBytes( + public Builder setBackupIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000002; - snapshotName_ = value; + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotTableStateData) - } - - static { - defaultInstance = new SnapshotTableStateData(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotTableStateData) - } - - public interface BackupImageOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string backup_id = 1; - /** - * required string backup_id = 1; - */ - boolean hasBackupId(); - /** - * required string backup_id = 1; - */ - java.lang.String getBackupId(); - /** - * required string backup_id = 1; - */ - com.google.protobuf.ByteString - getBackupIdBytes(); - - // required .hbase.pb.BackupType backup_type = 2; - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - boolean hasBackupType(); - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType(); - - // required string root_dir = 3; - /** - * required string root_dir = 3; - */ - boolean hasRootDir(); - /** - * required string root_dir = 3; - */ - java.lang.String getRootDir(); - /** - * required string root_dir = 3; - */ - com.google.protobuf.ByteString - getRootDirBytes(); - - // repeated .hbase.pb.TableName table_list = 4; - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - java.util.List - getTableListList(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - int getTableListCount(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - java.util.List - getTableListOrBuilderList(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index); - - // required uint64 start_ts = 5; - /** - * required uint64 start_ts = 5; - */ - boolean hasStartTs(); - /** - * required uint64 start_ts = 5; - */ - long getStartTs(); - - // required uint64 complete_ts = 6; - /** - * required uint64 complete_ts = 6; - */ - boolean hasCompleteTs(); - /** - * required uint64 complete_ts = 6; - */ - long getCompleteTs(); - - // repeated .hbase.pb.BackupImage ancestors = 7; - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - java.util.List - getAncestorsList(); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - int getAncestorsCount(); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - java.util.List - getAncestorsOrBuilderList(); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( - int index); - } - /** - * Protobuf type {@code hbase.pb.BackupImage} - */ - public static final class BackupImage extends - com.google.protobuf.GeneratedMessage - implements BackupImageOrBuilder { - // Use BackupImage.newBuilder() to construct. - private BackupImage(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BackupImage defaultInstance; - public static BackupImage getDefaultInstance() { - return defaultInstance; - } - - public BackupImage getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BackupImage( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - backupId_ = input.readBytes(); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - backupType_ = value; - } - break; - } - case 26: { - bitField0_ |= 0x00000004; - rootDir_ = input.readBytes(); - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); - break; - } - case 40: { - bitField0_ |= 0x00000008; - startTs_ = input.readUInt64(); - break; - } - case 48: { - bitField0_ |= 0x00000010; - completeTs_ = input.readUInt64(); - break; - } - case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000040; - } - ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = java.util.Collections.unmodifiableList(tableList_); - } - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = java.util.Collections.unmodifiableList(ancestors_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupImage parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupImage(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string backup_id = 1; - public static final int BACKUP_ID_FIELD_NUMBER = 1; - private java.lang.Object backupId_; - /** - * required string backup_id = 1; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string backup_id = 1; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } - /** - * required string backup_id = 1; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .hbase.pb.BackupType backup_type = 2; - public static final int BACKUP_TYPE_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_; - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public boolean hasBackupType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { - return backupType_; - } - - // required string root_dir = 3; - public static final int ROOT_DIR_FIELD_NUMBER = 3; - private java.lang.Object rootDir_; - /** - * required string root_dir = 3; - */ - public boolean hasRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string root_dir = 3; - */ - public java.lang.String getRootDir() { - java.lang.Object ref = rootDir_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - rootDir_ = s; - } - return s; - } - } - /** - * required string root_dir = 3; - */ - public com.google.protobuf.ByteString - getRootDirBytes() { - java.lang.Object ref = rootDir_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - rootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .hbase.pb.TableName table_list = 4; - public static final int TABLE_LIST_FIELD_NUMBER = 4; - private java.util.List tableList_; - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List getTableListList() { - return tableList_; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListOrBuilderList() { - return tableList_; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public int getTableListCount() { - return tableList_.size(); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { - return tableList_.get(index); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index) { - return tableList_.get(index); - } - - // required uint64 start_ts = 5; - public static final int START_TS_FIELD_NUMBER = 5; - private long startTs_; - /** - * required uint64 start_ts = 5; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required uint64 start_ts = 5; - */ - public long getStartTs() { - return startTs_; - } - - // required uint64 complete_ts = 6; - public static final int COMPLETE_TS_FIELD_NUMBER = 6; - private long completeTs_; - /** - * required uint64 complete_ts = 6; - */ - public boolean hasCompleteTs() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * required uint64 complete_ts = 6; - */ - public long getCompleteTs() { - return completeTs_; - } - - // repeated .hbase.pb.BackupImage ancestors = 7; - public static final int ANCESTORS_FIELD_NUMBER = 7; - private java.util.List ancestors_; - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public java.util.List getAncestorsList() { - return ancestors_; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public java.util.List - getAncestorsOrBuilderList() { - return ancestors_; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public int getAncestorsCount() { - return ancestors_.size(); - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { - return ancestors_.get(index); - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( - int index) { - return ancestors_.get(index); - } - - private void initFields() { - backupId_ = ""; - backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - rootDir_ = ""; - tableList_ = java.util.Collections.emptyList(); - startTs_ = 0L; - completeTs_ = 0L; - ancestors_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBackupId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBackupType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRootDir()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStartTs()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCompleteTs()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTableListCount(); i++) { - if (!getTableList(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getAncestorsCount(); i++) { - if (!getAncestors(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBackupIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, backupType_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getRootDirBytes()); - } - for (int i = 0; i < tableList_.size(); i++) { - output.writeMessage(4, tableList_.get(i)); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(5, startTs_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(6, completeTs_); - } - for (int i = 0; i < ancestors_.size(); i++) { - output.writeMessage(7, ancestors_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBackupIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, backupType_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getRootDirBytes()); - } - for (int i = 0; i < tableList_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tableList_.get(i)); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, startTs_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, completeTs_); - } - for (int i = 0; i < ancestors_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, ancestors_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj; - - boolean result = true; - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } - result = result && (hasBackupType() == other.hasBackupType()); - if (hasBackupType()) { - result = result && - (getBackupType() == other.getBackupType()); - } - result = result && (hasRootDir() == other.hasRootDir()); - if (hasRootDir()) { - result = result && getRootDir() - .equals(other.getRootDir()); - } - result = result && getTableListList() - .equals(other.getTableListList()); - result = result && (hasStartTs() == other.hasStartTs()); - if (hasStartTs()) { - result = result && (getStartTs() - == other.getStartTs()); - } - result = result && (hasCompleteTs() == other.hasCompleteTs()); - if (hasCompleteTs()) { - result = result && (getCompleteTs() - == other.getCompleteTs()); - } - result = result && getAncestorsList() - .equals(other.getAncestorsList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - if (hasBackupType()) { - hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getBackupType()); - } - if (hasRootDir()) { - hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getRootDir().hashCode(); - } - if (getTableListCount() > 0) { - hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; - hash = (53 * hash) + getTableListList().hashCode(); - } - if (hasStartTs()) { - hash = (37 * hash) + START_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTs()); - } - if (hasCompleteTs()) { - hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCompleteTs()); - } - if (getAncestorsCount() > 0) { - hash = (37 * hash) + ANCESTORS_FIELD_NUMBER; - hash = (53 * hash) + getAncestorsList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.BackupImage} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableListFieldBuilder(); - getAncestorsFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - bitField0_ = (bitField0_ & ~0x00000002); - rootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - if (tableListBuilder_ == null) { - tableList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - tableListBuilder_.clear(); - } - startTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - completeTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - if (ancestorsBuilder_ == null) { - ancestors_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ancestorsBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.backupId_ = backupId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.backupType_ = backupType_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.rootDir_ = rootDir_; - if (tableListBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = java.util.Collections.unmodifiableList(tableList_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.tableList_ = tableList_; - } else { - result.tableList_ = tableListBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.startTs_ = startTs_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000010; - } - result.completeTs_ = completeTs_; - if (ancestorsBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = java.util.Collections.unmodifiableList(ancestors_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.ancestors_ = ancestors_; - } else { - result.ancestors_ = ancestorsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this; - if (other.hasBackupId()) { - bitField0_ |= 0x00000001; - backupId_ = other.backupId_; - onChanged(); - } - if (other.hasBackupType()) { - setBackupType(other.getBackupType()); - } - if (other.hasRootDir()) { - bitField0_ |= 0x00000004; - rootDir_ = other.rootDir_; - onChanged(); - } - if (tableListBuilder_ == null) { - if (!other.tableList_.isEmpty()) { - if (tableList_.isEmpty()) { - tableList_ = other.tableList_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureTableListIsMutable(); - tableList_.addAll(other.tableList_); - } - onChanged(); - } - } else { - if (!other.tableList_.isEmpty()) { - if (tableListBuilder_.isEmpty()) { - tableListBuilder_.dispose(); - tableListBuilder_ = null; - tableList_ = other.tableList_; - bitField0_ = (bitField0_ & ~0x00000008); - tableListBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTableListFieldBuilder() : null; - } else { - tableListBuilder_.addAllMessages(other.tableList_); - } - } - } - if (other.hasStartTs()) { - setStartTs(other.getStartTs()); - } - if (other.hasCompleteTs()) { - setCompleteTs(other.getCompleteTs()); - } - if (ancestorsBuilder_ == null) { - if (!other.ancestors_.isEmpty()) { - if (ancestors_.isEmpty()) { - ancestors_ = other.ancestors_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureAncestorsIsMutable(); - ancestors_.addAll(other.ancestors_); - } - onChanged(); - } - } else { - if (!other.ancestors_.isEmpty()) { - if (ancestorsBuilder_.isEmpty()) { - ancestorsBuilder_.dispose(); - ancestorsBuilder_ = null; - ancestors_ = other.ancestors_; - bitField0_ = (bitField0_ & ~0x00000040); - ancestorsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getAncestorsFieldBuilder() : null; - } else { - ancestorsBuilder_.addAllMessages(other.ancestors_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBackupId()) { - - return false; - } - if (!hasBackupType()) { - - return false; - } - if (!hasRootDir()) { - - return false; - } - if (!hasStartTs()) { - - return false; - } - if (!hasCompleteTs()) { - - return false; - } - for (int i = 0; i < getTableListCount(); i++) { - if (!getTableList(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getAncestorsCount(); i++) { - if (!getAncestors(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string backup_id = 1; - private java.lang.Object backupId_ = ""; - /** - * required string backup_id = 1; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string backup_id = 1; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string backup_id = 1; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string backup_id = 1; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; - } - /** - * required string backup_id = 1; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; - } - /** - * required string backup_id = 1; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; - } - - // required .hbase.pb.BackupType backup_type = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public boolean hasBackupType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { - return backupType_; - } - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupType_ = value; - onChanged(); - return this; - } - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public Builder clearBackupType() { - bitField0_ = (bitField0_ & ~0x00000002); - backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - onChanged(); - return this; - } - - // required string root_dir = 3; - private java.lang.Object rootDir_ = ""; - /** - * required string root_dir = 3; - */ - public boolean hasRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string root_dir = 3; - */ - public java.lang.String getRootDir() { - java.lang.Object ref = rootDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - rootDir_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string root_dir = 3; - */ - public com.google.protobuf.ByteString - getRootDirBytes() { - java.lang.Object ref = rootDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - rootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string root_dir = 3; - */ - public Builder setRootDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - rootDir_ = value; - onChanged(); - return this; - } - /** - * required string root_dir = 3; - */ - public Builder clearRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - rootDir_ = getDefaultInstance().getRootDir(); - onChanged(); - return this; - } - /** - * required string root_dir = 3; - */ - public Builder setRootDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - rootDir_ = value; - onChanged(); - return this; - } - - // repeated .hbase.pb.TableName table_list = 4; - private java.util.List tableList_ = - java.util.Collections.emptyList(); - private void ensureTableListIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = new java.util.ArrayList(tableList_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; - - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List getTableListList() { - if (tableListBuilder_ == null) { - return java.util.Collections.unmodifiableList(tableList_); - } else { - return tableListBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public int getTableListCount() { - if (tableListBuilder_ == null) { - return tableList_.size(); - } else { - return tableListBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { - if (tableListBuilder_ == null) { - return tableList_.get(index); - } else { - return tableListBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder setTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.set(index, value); - onChanged(); - } else { - tableListBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder setTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.set(index, builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.add(value); - onChanged(); - } else { - tableListBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.add(index, value); - onChanged(); - } else { - tableListBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.add(builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.add(index, builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addAllTableList( - java.lang.Iterable values) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - super.addAll(values, tableList_); - onChanged(); - } else { - tableListBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder clearTableList() { - if (tableListBuilder_ == null) { - tableList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - tableListBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder removeTableList(int index) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.remove(index); - onChanged(); - } else { - tableListBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( - int index) { - return getTableListFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index) { - if (tableListBuilder_ == null) { - return tableList_.get(index); } else { - return tableListBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListOrBuilderList() { - if (tableListBuilder_ != null) { - return tableListBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tableList_); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { - return getTableListFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( - int index) { - return getTableListFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListBuilderList() { - return getTableListFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTableListFieldBuilder() { - if (tableListBuilder_ == null) { - tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - tableList_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - tableList_ = null; - } - return tableListBuilder_; - } - - // required uint64 start_ts = 5; - private long startTs_ ; - /** - * required uint64 start_ts = 5; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * required uint64 start_ts = 5; - */ - public long getStartTs() { - return startTs_; - } - /** - * required uint64 start_ts = 5; - */ - public Builder setStartTs(long value) { - bitField0_ |= 0x00000010; - startTs_ = value; - onChanged(); - return this; - } - /** - * required uint64 start_ts = 5; - */ - public Builder clearStartTs() { - bitField0_ = (bitField0_ & ~0x00000010); - startTs_ = 0L; - onChanged(); - return this; - } - - // required uint64 complete_ts = 6; - private long completeTs_ ; - /** - * required uint64 complete_ts = 6; - */ - public boolean hasCompleteTs() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * required uint64 complete_ts = 6; - */ - public long getCompleteTs() { - return completeTs_; - } - /** - * required uint64 complete_ts = 6; - */ - public Builder setCompleteTs(long value) { - bitField0_ |= 0x00000020; - completeTs_ = value; - onChanged(); - return this; - } - /** - * required uint64 complete_ts = 6; - */ - public Builder clearCompleteTs() { - bitField0_ = (bitField0_ & ~0x00000020); - completeTs_ = 0L; - onChanged(); - return this; - } - - // repeated .hbase.pb.BackupImage ancestors = 7; - private java.util.List ancestors_ = - java.util.Collections.emptyList(); - private void ensureAncestorsIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = new java.util.ArrayList(ancestors_); - bitField0_ |= 0x00000040; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> ancestorsBuilder_; - - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public java.util.List getAncestorsList() { - if (ancestorsBuilder_ == null) { - return java.util.Collections.unmodifiableList(ancestors_); - } else { - return ancestorsBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public int getAncestorsCount() { - if (ancestorsBuilder_ == null) { - return ancestors_.size(); - } else { - return ancestorsBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { - if (ancestorsBuilder_ == null) { - return ancestors_.get(index); - } else { - return ancestorsBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public Builder setAncestors( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { - if (ancestorsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAncestorsIsMutable(); - ancestors_.set(index, value); - onChanged(); - } else { - ancestorsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public Builder setAncestors( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { - if (ancestorsBuilder_ == null) { - ensureAncestorsIsMutable(); - ancestors_.set(index, builderForValue.build()); - onChanged(); - } else { - ancestorsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public Builder addAncestors(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { - if (ancestorsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAncestorsIsMutable(); - ancestors_.add(value); - onChanged(); - } else { - ancestorsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public Builder addAncestors( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { - if (ancestorsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAncestorsIsMutable(); - ancestors_.add(index, value); - onChanged(); - } else { - ancestorsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public Builder addAncestors( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { - if (ancestorsBuilder_ == null) { - ensureAncestorsIsMutable(); - ancestors_.add(builderForValue.build()); - onChanged(); - } else { - ancestorsBuilder_.addMessage(builderForValue.build()); - } - return this; - } + // required .hbase.pb.BackupType backup_type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required .hbase.pb.BackupType backup_type = 2; */ - public Builder addAncestors( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { - if (ancestorsBuilder_ == null) { - ensureAncestorsIsMutable(); - ancestors_.add(index, builderForValue.build()); - onChanged(); - } else { - ancestorsBuilder_.addMessage(index, builderForValue.build()); - } - return this; + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required .hbase.pb.BackupType backup_type = 2; */ - public Builder addAllAncestors( - java.lang.Iterable values) { - if (ancestorsBuilder_ == null) { - ensureAncestorsIsMutable(); - super.addAll(values, ancestors_); - onChanged(); - } else { - ancestorsBuilder_.addAllMessages(values); - } - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; } /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required .hbase.pb.BackupType backup_type = 2; */ - public Builder clearAncestors() { - if (ancestorsBuilder_ == null) { - ancestors_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - onChanged(); - } else { - ancestorsBuilder_.clear(); + public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); } + bitField0_ |= 0x00000002; + backupType_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required .hbase.pb.BackupType backup_type = 2; */ - public Builder removeAncestors(int index) { - if (ancestorsBuilder_ == null) { - ensureAncestorsIsMutable(); - ancestors_.remove(index); - onChanged(); - } else { - ancestorsBuilder_.remove(index); - } + public Builder clearBackupType() { + bitField0_ = (bitField0_ & ~0x00000002); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); return this; } + + // required string root_dir = 3; + private java.lang.Object rootDir_ = ""; /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required string root_dir = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getAncestorsBuilder( - int index) { - return getAncestorsFieldBuilder().getBuilder(index); + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required string root_dir = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( - int index) { - if (ancestorsBuilder_ == null) { - return ancestors_.get(index); } else { - return ancestorsBuilder_.getMessageOrBuilder(index); + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rootDir_ = s; + return s; + } else { + return (java.lang.String) ref; } } /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required string root_dir = 3; */ - public java.util.List - getAncestorsOrBuilderList() { - if (ancestorsBuilder_ != null) { - return ancestorsBuilder_.getMessageOrBuilderList(); + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; } else { - return java.util.Collections.unmodifiableList(ancestors_); + return (com.google.protobuf.ByteString) ref; } } /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder() { - return getAncestorsFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required string root_dir = 3; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder( - int index) { - return getAncestorsFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + public Builder setRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; } /** - * repeated .hbase.pb.BackupImage ancestors = 7; + * required string root_dir = 3; */ - public java.util.List - getAncestorsBuilderList() { - return getAncestorsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> - getAncestorsFieldBuilder() { - if (ancestorsBuilder_ == null) { - ancestorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( - ancestors_, - ((bitField0_ & 0x00000040) == 0x00000040), - getParentForChildren(), - isClean()); - ancestors_ = null; - } - return ancestorsBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupImage) - } - - static { - defaultInstance = new BackupImage(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.BackupImage) - } - - public interface ServerTimestampOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string server = 1; - /** - * required string server = 1; - */ - boolean hasServer(); - /** - * required string server = 1; - */ - java.lang.String getServer(); - /** - * required string server = 1; - */ - com.google.protobuf.ByteString - getServerBytes(); - - // required uint64 timestamp = 2; - /** - * required uint64 timestamp = 2; - */ - boolean hasTimestamp(); - /** - * required uint64 timestamp = 2; - */ - long getTimestamp(); - } - /** - * Protobuf type {@code hbase.pb.ServerTimestamp} - */ - public static final class ServerTimestamp extends - com.google.protobuf.GeneratedMessage - implements ServerTimestampOrBuilder { - // Use ServerTimestamp.newBuilder() to construct. - private ServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ServerTimestamp defaultInstance; - public static ServerTimestamp getDefaultInstance() { - return defaultInstance; - } - - public ServerTimestamp getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ServerTimestamp( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - server_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - timestamp_ = input.readUInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + public Builder clearRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + rootDir_ = getDefaultInstance().getRootDir(); + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder setRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); - } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ServerTimestamp parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ServerTimestamp(input, extensionRegistry); + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } } - }; - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; - private int bitField0_; - // required string server = 1; - public static final int SERVER_FIELD_NUMBER = 1; - private java.lang.Object server_; - /** - * required string server = 1; - */ - public boolean hasServer() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string server = 1; - */ - public java.lang.String getServer() { - java.lang.Object ref = server_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - server_ = s; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); } - return s; } - } - /** - * required string server = 1; - */ - public com.google.protobuf.ByteString - getServerBytes() { - java.lang.Object ref = server_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - server_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); + } } - } - - // required uint64 timestamp = 2; - public static final int TIMESTAMP_FIELD_NUMBER = 2; - private long timestamp_; - /** - * required uint64 timestamp = 2; - */ - public boolean hasTimestamp() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required uint64 timestamp = 2; - */ - public long getTimestamp() { - return timestamp_; - } - - private void initFields() { - server_ = ""; - timestamp_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasServer()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } } - if (!hasTimestamp()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); + } + return this; } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getServerBytes()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, timestamp_); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getServerBytes()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, timestamp_); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( + int index) { + return getTableListFieldBuilder().getBuilder(index); } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)) { - return super.equals(obj); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); } else { + return tableListBuilder_.getMessageOrBuilder(index); + } } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) obj; - - boolean result = true; - result = result && (hasServer() == other.hasServer()); - if (hasServer()) { - result = result && getServer() - .equals(other.getServer()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + if (tableListBuilder_ != null) { + return tableListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableList_); + } } - result = result && (hasTimestamp() == other.hasTimestamp()); - if (hasTimestamp()) { - result = result && (getTimestamp() - == other.getTimestamp()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { + return getTableListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( + int index) { + return getTableListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasServer()) { - hash = (37 * hash) + SERVER_FIELD_NUMBER; - hash = (53 * hash) + getServer().hashCode(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListBuilderList() { + return getTableListFieldBuilder().getBuilderList(); } - if (hasTimestamp()) { - hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTimestamp()); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableListFieldBuilder() { + if (tableListBuilder_ == null) { + tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableList_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tableList_ = null; + } + return tableListBuilder_; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.ServerTimestamp} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + // required uint64 start_ts = 5; + private long startTs_ ; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + * required uint64 start_ts = 5; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000010; + startTs_ = value; + onChanged(); + return this; } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + /** + * required uint64 start_ts = 5; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000010); + startTs_ = 0L; + onChanged(); + return this; } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } + + // required uint64 complete_ts = 6; + private long completeTs_ ; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000020) == 0x00000020); } - private static Builder create() { - return new Builder(); + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; } - - public Builder clear() { - super.clear(); - server_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - timestamp_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); + /** + * required uint64 complete_ts = 6; + */ + public Builder setCompleteTs(long value) { + bitField0_ |= 0x00000020; + completeTs_ = value; + onChanged(); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * required uint64 complete_ts = 6; + */ + public Builder clearCompleteTs() { + bitField0_ = (bitField0_ & ~0x00000020); + completeTs_ = 0L; + onChanged(); + return this; } - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + // repeated .hbase.pb.BackupImage ancestors = 7; + private java.util.List ancestors_ = + java.util.Collections.emptyList(); + private void ensureAncestorsIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(ancestors_); + bitField0_ |= 0x00000040; + } } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance(); - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> ancestorsBuilder_; - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + if (ancestorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(ancestors_); + } else { + return ancestorsBuilder_.getMessageList(); } - return result; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.server_ = server_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + if (ancestorsBuilder_ == null) { + return ancestors_.size(); + } else { + return ancestorsBuilder_.getCount(); } - result.timestamp_ = timestamp_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)other); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + if (ancestorsBuilder_ == null) { + return ancestors_.get(index); } else { - super.mergeFrom(other); - return this; + return ancestorsBuilder_.getMessage(index); } } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()) return this; - if (other.hasServer()) { - bitField0_ |= 0x00000001; - server_ = other.server_; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder setAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.set(index, value); onChanged(); + } else { + ancestorsBuilder_.setMessage(index, value); } - if (other.hasTimestamp()) { - setTimestamp(other.getTimestamp()); - } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - if (!hasServer()) { - - return false; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder setAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.set(index, builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.setMessage(index, builderForValue.build()); } - if (!hasTimestamp()) { - - return false; + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.add(value); + onChanged(); + } else { + ancestorsBuilder_.addMessage(value); } - return true; + return this; } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureAncestorsIsMutable(); + ancestors_.add(index, value); + onChanged(); + } else { + ancestorsBuilder_.addMessage(index, value); } return this; } - private int bitField0_; - - // required string server = 1; - private java.lang.Object server_ = ""; /** - * required string server = 1; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public boolean hasServer() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public Builder addAncestors( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.add(builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.addMessage(builderForValue.build()); + } + return this; } /** - * required string server = 1; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public java.lang.String getServer() { - java.lang.Object ref = server_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - server_ = s; - return s; + public Builder addAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.add(index, builderForValue.build()); + onChanged(); } else { - return (java.lang.String) ref; + ancestorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAllAncestors( + java.lang.Iterable values) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + super.addAll(values, ancestors_); + onChanged(); + } else { + ancestorsBuilder_.addAllMessages(values); } + return this; } /** - * required string server = 1; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public com.google.protobuf.ByteString - getServerBytes() { - java.lang.Object ref = server_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - server_ = b; - return b; + public Builder clearAncestors() { + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + ancestorsBuilder_.clear(); } + return this; } /** - * required string server = 1; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public Builder setServer( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - server_ = value; - onChanged(); + public Builder removeAncestors(int index) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.remove(index); + onChanged(); + } else { + ancestorsBuilder_.remove(index); + } return this; } /** - * required string server = 1; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public Builder clearServer() { - bitField0_ = (bitField0_ & ~0x00000001); - server_ = getDefaultInstance().getServer(); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getAncestorsBuilder( + int index) { + return getAncestorsFieldBuilder().getBuilder(index); } /** - * required string server = 1; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public Builder setServerBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - server_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + if (ancestorsBuilder_ == null) { + return ancestors_.get(index); } else { + return ancestorsBuilder_.getMessageOrBuilder(index); + } } - - // required uint64 timestamp = 2; - private long timestamp_ ; /** - * required uint64 timestamp = 2; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public boolean hasTimestamp() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public java.util.List + getAncestorsOrBuilderList() { + if (ancestorsBuilder_ != null) { + return ancestorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(ancestors_); + } } /** - * required uint64 timestamp = 2; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public long getTimestamp() { - return timestamp_; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder() { + return getAncestorsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); } /** - * required uint64 timestamp = 2; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public Builder setTimestamp(long value) { - bitField0_ |= 0x00000002; - timestamp_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder( + int index) { + return getAncestorsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); } /** - * required uint64 timestamp = 2; + * repeated .hbase.pb.BackupImage ancestors = 7; */ - public Builder clearTimestamp() { - bitField0_ = (bitField0_ & ~0x00000002); - timestamp_ = 0L; - onChanged(); - return this; + public java.util.List + getAncestorsBuilderList() { + return getAncestorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> + getAncestorsFieldBuilder() { + if (ancestorsBuilder_ == null) { + ancestorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( + ancestors_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + ancestors_ = null; + } + return ancestorsBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.ServerTimestamp) + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupImage) } static { - defaultInstance = new ServerTimestamp(true); + defaultInstance = new BackupImage(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.ServerTimestamp) + // @@protoc_insertion_point(class_scope:hbase.pb.BackupImage) } - public interface TableServerTimestampOrBuilder + public interface ServerTimestampOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.TableName table = 1; + // required string server = 1; /** - * required .hbase.pb.TableName table = 1; + * required string server = 1; */ - boolean hasTable(); + boolean hasServer(); /** - * required .hbase.pb.TableName table = 1; + * required string server = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + java.lang.String getServer(); /** - * required .hbase.pb.TableName table = 1; + * required string server = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + com.google.protobuf.ByteString + getServerBytes(); - // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - java.util.List - getServerTimestampList(); - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - int getServerTimestampCount(); + // required uint64 timestamp = 2; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - java.util.List - getServerTimestampOrBuilderList(); + boolean hasTimestamp(); /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( - int index); + long getTimestamp(); } /** - * Protobuf type {@code hbase.pb.TableServerTimestamp} + * Protobuf type {@code hbase.pb.ServerTimestamp} */ - public static final class TableServerTimestamp extends + public static final class ServerTimestamp extends com.google.protobuf.GeneratedMessage - implements TableServerTimestampOrBuilder { - // Use TableServerTimestamp.newBuilder() to construct. - private TableServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { + implements ServerTimestampOrBuilder { + // Use ServerTimestamp.newBuilder() to construct. + private ServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private TableServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private ServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final TableServerTimestamp defaultInstance; - public static TableServerTimestamp getDefaultInstance() { + private static final ServerTimestamp defaultInstance; + public static ServerTimestamp getDefaultInstance() { return defaultInstance; } - public TableServerTimestamp getDefaultInstanceForType() { + public ServerTimestamp getDefaultInstanceForType() { return defaultInstance; } @@ -3453,7 +1919,7 @@ public final class BackupProtos { getUnknownFields() { return this.unknownFields; } - private TableServerTimestamp( + private ServerTimestamp( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3477,24 +1943,13 @@ public final class BackupProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = table_.toBuilder(); - } - table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(table_); - table_ = subBuilder.buildPartial(); - } bitField0_ |= 0x00000001; + server_ = input.readBytes(); break; } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); + case 16: { + bitField0_ |= 0x00000002; + timestamp_ = input.readUInt64(); break; } } @@ -3505,122 +1960,114 @@ public final class BackupProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public TableServerTimestamp parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ServerTimestamp parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new TableServerTimestamp(input, extensionRegistry); + return new ServerTimestamp(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required .hbase.pb.TableName table = 1; - public static final int TABLE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; - /** - * required .hbase.pb.TableName table = 1; - */ - public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { - return table_; - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { - return table_; - } - - // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; - private java.util.List serverTimestamp_; + // required string server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private java.lang.Object server_; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public java.util.List getServerTimestampList() { - return serverTimestamp_; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public java.util.List - getServerTimestampOrBuilderList() { - return serverTimestamp_; + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + server_ = s; + } + return s; + } } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public int getServerTimestampCount() { - return serverTimestamp_.size(); + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } + + // required uint64 timestamp = 2; + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { - return serverTimestamp_.get(index); + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( - int index) { - return serverTimestamp_.get(index); + public long getTimestamp() { + return timestamp_; } private void initFields() { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - serverTimestamp_ = java.util.Collections.emptyList(); + server_ = ""; + timestamp_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasTable()) { + if (!hasServer()) { memoizedIsInitialized = 0; return false; } - if (!getTable().isInitialized()) { + if (!hasTimestamp()) { memoizedIsInitialized = 0; return false; } - for (int i = 0; i < getServerTimestampCount(); i++) { - if (!getServerTimestamp(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } memoizedIsInitialized = 1; return true; } @@ -3629,10 +2076,10 @@ public final class BackupProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, table_); + output.writeBytes(1, getServerBytes()); } - for (int i = 0; i < serverTimestamp_.size(); i++) { - output.writeMessage(2, serverTimestamp_.get(i)); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timestamp_); } getUnknownFields().writeTo(output); } @@ -3645,11 +2092,11 @@ public final class BackupProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, table_); + .computeBytesSize(1, getServerBytes()); } - for (int i = 0; i < serverTimestamp_.size(); i++) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, serverTimestamp_.get(i)); + .computeUInt64Size(2, timestamp_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3668,19 +2115,22 @@ public final class BackupProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) obj; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) obj; boolean result = true; - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); } - result = result && getServerTimestampList() - .equals(other.getServerTimestampList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3694,793 +2144,401 @@ public final class BackupProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); } - if (getServerTimestampCount() > 0) { - hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + getServerTimestampList().hashCode(); + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.TableServerTimestamp} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableFieldBuilder(); - getServerTimestampFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - } else { - tableBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (serverTimestampBuilder_ == null) { - serverTimestamp_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - serverTimestampBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (tableBuilder_ == null) { - result.table_ = table_; - } else { - result.table_ = tableBuilder_.build(); - } - if (serverTimestampBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.serverTimestamp_ = serverTimestamp_; - } else { - result.serverTimestamp_ = serverTimestampBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)other); - } else { - super.mergeFrom(other); - return this; - } - } + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()) return this; - if (other.hasTable()) { - mergeTable(other.getTable()); - } - if (serverTimestampBuilder_ == null) { - if (!other.serverTimestamp_.isEmpty()) { - if (serverTimestamp_.isEmpty()) { - serverTimestamp_ = other.serverTimestamp_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureServerTimestampIsMutable(); - serverTimestamp_.addAll(other.serverTimestamp_); - } - onChanged(); - } - } else { - if (!other.serverTimestamp_.isEmpty()) { - if (serverTimestampBuilder_.isEmpty()) { - serverTimestampBuilder_.dispose(); - serverTimestampBuilder_ = null; - serverTimestamp_ = other.serverTimestamp_; - bitField0_ = (bitField0_ & ~0x00000002); - serverTimestampBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getServerTimestampFieldBuilder() : null; - } else { - serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ServerTimestamp} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; } - public final boolean isInitialized() { - if (!hasTable()) { - - return false; - } - if (!getTable().isInitialized()) { - - return false; - } - for (int i = 0; i < getServerTimestampCount(); i++) { - if (!getServerTimestamp(i).isInitialized()) { - - return false; - } - } - return true; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - private int bitField0_; - // required .hbase.pb.TableName table = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; - /** - * required .hbase.pb.TableName table = 1; - */ - public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { - if (tableBuilder_ == null) { - return table_; - } else { - return tableBuilder_.getMessage(); - } - } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - table_ = value; - onChanged(); - } else { - tableBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder setTable( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableBuilder_ == null) { - table_ = builderForValue.build(); - onChanged(); - } else { - tableBuilder_.setMessage(builderForValue.build()); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } - bitField0_ |= 0x00000001; - return this; } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { - table_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); - } else { - table_ = value; - } - onChanged(); - } else { - tableBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; + private static Builder create() { + return new Builder(); } - /** - * required .hbase.pb.TableName table = 1; - */ - public Builder clearTable() { - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - onChanged(); - } else { - tableBuilder_.clear(); - } + + public Builder clear() { + super.clear(); + server_ = ""; bitField0_ = (bitField0_ & ~0x00000001); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); return this; } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTableFieldBuilder().getBuilder(); + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { - if (tableBuilder_ != null) { - return tableBuilder_.getMessageOrBuilder(); - } else { - return table_; - } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; } - /** - * required .hbase.pb.TableName table = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTableFieldBuilder() { - if (tableBuilder_ == null) { - tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - table_, - getParentForChildren(), - isClean()); - table_ = null; - } - return tableBuilder_; + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance(); } - // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - private java.util.List serverTimestamp_ = - java.util.Collections.emptyList(); - private void ensureServerTimestampIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); - bitField0_ |= 0x00000002; - } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; - - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public java.util.List getServerTimestampList() { - if (serverTimestampBuilder_ == null) { - return java.util.Collections.unmodifiableList(serverTimestamp_); - } else { - return serverTimestampBuilder_.getMessageList(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; } - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public int getServerTimestampCount() { - if (serverTimestampBuilder_ == null) { - return serverTimestamp_.size(); - } else { - return serverTimestampBuilder_.getCount(); + result.server_ = server_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } + result.timestamp_ = timestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { - if (serverTimestampBuilder_ == null) { - return serverTimestamp_.get(index); + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)other); } else { - return serverTimestampBuilder_.getMessage(index); + super.mergeFrom(other); + return this; } } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public Builder setServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { - if (serverTimestampBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerTimestampIsMutable(); - serverTimestamp_.set(index, value); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()) return this; + if (other.hasServer()) { + bitField0_ |= 0x00000001; + server_ = other.server_; onChanged(); - } else { - serverTimestampBuilder_.setMessage(index, value); } - return this; - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public Builder setServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.set(index, builderForValue.build()); - onChanged(); - } else { - serverTimestampBuilder_.setMessage(index, builderForValue.build()); + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { - if (serverTimestampBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerTimestampIsMutable(); - serverTimestamp_.add(value); - onChanged(); - } else { - serverTimestampBuilder_.addMessage(value); + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; } - return this; + if (!hasTimestamp()) { + + return false; + } + return true; } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public Builder addServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { - if (serverTimestampBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } - ensureServerTimestampIsMutable(); - serverTimestamp_.add(index, value); - onChanged(); - } else { - serverTimestampBuilder_.addMessage(index, value); } return this; } + private int bitField0_; + + // required string server = 1; + private java.lang.Object server_ = ""; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public Builder addServerTimestamp( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.add(builderForValue.build()); - onChanged(); - } else { - serverTimestampBuilder_.addMessage(builderForValue.build()); - } - return this; + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public Builder addServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.add(index, builderForValue.build()); - onChanged(); + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + server_ = s; + return s; } else { - serverTimestampBuilder_.addMessage(index, builderForValue.build()); + return (java.lang.String) ref; } - return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public Builder addAllServerTimestamp( - java.lang.Iterable values) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - super.addAll(values, serverTimestamp_); - onChanged(); + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; } else { - serverTimestampBuilder_.addAllMessages(values); + return (com.google.protobuf.ByteString) ref; } - return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public Builder clearServerTimestamp() { - if (serverTimestampBuilder_ == null) { - serverTimestamp_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - serverTimestampBuilder_.clear(); - } + public Builder setServer( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public Builder removeServerTimestamp(int index) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.remove(index); - onChanged(); - } else { - serverTimestampBuilder_.remove(index); - } + public Builder clearServer() { + bitField0_ = (bitField0_ & ~0x00000001); + server_ = getDefaultInstance().getServer(); + onChanged(); return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( - int index) { - return getServerTimestampFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( - int index) { - if (serverTimestampBuilder_ == null) { - return serverTimestamp_.get(index); } else { - return serverTimestampBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required string server = 1; */ - public java.util.List - getServerTimestampOrBuilderList() { - if (serverTimestampBuilder_ != null) { - return serverTimestampBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(serverTimestamp_); - } + public Builder setServerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; } + + // required uint64 timestamp = 2; + private long timestamp_ ; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { - return getServerTimestampFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( - int index) { - return getServerTimestampFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + public long getTimestamp() { + return timestamp_; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 timestamp = 2; */ - public java.util.List - getServerTimestampBuilderList() { - return getServerTimestampFieldBuilder().getBuilderList(); + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000002; + timestamp_ = value; + onChanged(); + return this; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> - getServerTimestampFieldBuilder() { - if (serverTimestampBuilder_ == null) { - serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( - serverTimestamp_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - serverTimestamp_ = null; - } - return serverTimestampBuilder_; + /** + * required uint64 timestamp = 2; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.TableServerTimestamp) + // @@protoc_insertion_point(builder_scope:hbase.pb.ServerTimestamp) } static { - defaultInstance = new TableServerTimestamp(true); + defaultInstance = new ServerTimestamp(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.TableServerTimestamp) + // @@protoc_insertion_point(class_scope:hbase.pb.ServerTimestamp) } - public interface BackupManifestOrBuilder + public interface TableServerTimestampOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string version = 1; - /** - * required string version = 1; - */ - boolean hasVersion(); - /** - * required string version = 1; - */ - java.lang.String getVersion(); - /** - * required string version = 1; - */ - com.google.protobuf.ByteString - getVersionBytes(); - - // required string backup_id = 2; - /** - * required string backup_id = 2; - */ - boolean hasBackupId(); - /** - * required string backup_id = 2; - */ - java.lang.String getBackupId(); - /** - * required string backup_id = 2; - */ - com.google.protobuf.ByteString - getBackupIdBytes(); - - // required .hbase.pb.BackupType type = 3; - /** - * required .hbase.pb.BackupType type = 3; - */ - boolean hasType(); - /** - * required .hbase.pb.BackupType type = 3; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); - - // repeated .hbase.pb.TableName table_list = 4; - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - java.util.List - getTableListList(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - int getTableListCount(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - java.util.List - getTableListOrBuilderList(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index); - - // required uint64 start_ts = 5; - /** - * required uint64 start_ts = 5; - */ - boolean hasStartTs(); - /** - * required uint64 start_ts = 5; - */ - long getStartTs(); - - // required uint64 complete_ts = 6; - /** - * required uint64 complete_ts = 6; - */ - boolean hasCompleteTs(); - /** - * required uint64 complete_ts = 6; - */ - long getCompleteTs(); - - // repeated .hbase.pb.TableServerTimestamp tst_map = 7; - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - java.util.List - getTstMapList(); - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index); + // required .hbase.pb.TableName table = 1; /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + * required .hbase.pb.TableName table = 1; */ - int getTstMapCount(); + boolean hasTable(); /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + * required .hbase.pb.TableName table = 1; */ - java.util.List - getTstMapOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + * required .hbase.pb.TableName table = 1; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( - int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); - // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - java.util.List - getDependentBackupImageList(); + java.util.List + getServerTimestampList(); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - int getDependentBackupImageCount(); + int getServerTimestampCount(); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - java.util.List - getDependentBackupImageOrBuilderList(); + java.util.List + getServerTimestampOrBuilderList(); /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( int index); } /** - * Protobuf type {@code hbase.pb.BackupManifest} + * Protobuf type {@code hbase.pb.TableServerTimestamp} */ - public static final class BackupManifest extends + public static final class TableServerTimestamp extends com.google.protobuf.GeneratedMessage - implements BackupManifestOrBuilder { - // Use BackupManifest.newBuilder() to construct. - private BackupManifest(com.google.protobuf.GeneratedMessage.Builder builder) { + implements TableServerTimestampOrBuilder { + // Use TableServerTimestamp.newBuilder() to construct. + private TableServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BackupManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private TableServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BackupManifest defaultInstance; - public static BackupManifest getDefaultInstance() { + private static final TableServerTimestamp defaultInstance; + public static TableServerTimestamp getDefaultInstance() { return defaultInstance; } - public BackupManifest getDefaultInstanceForType() { + public TableServerTimestamp getDefaultInstanceForType() { return defaultInstance; } @@ -4490,7 +2548,7 @@ public final class BackupProtos { getUnknownFields() { return this.unknownFields; } - private BackupManifest( + private TableServerTimestamp( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4502,70 +2560,36 @@ public final class BackupProtos { boolean done = false; while (!done) { int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - version_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - backupId_ = input.readBytes(); - break; - } - case 24: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(3, rawValue); - } else { - bitField0_ |= 0x00000004; - type_ = value; - } - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); - break; - } - case 40: { - bitField0_ |= 0x00000008; - startTs_ = input.readUInt64(); + switch (tag) { + case 0: + done = true; break; - } - case 48: { - bitField0_ |= 0x00000010; - completeTs_ = input.readUInt64(); + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } break; } - case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - tstMap_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000040; + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); } - tstMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.PARSER, extensionRegistry)); + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; break; } - case 66: { - if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { - dependentBackupImage_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000080; + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; } - dependentBackupImage_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); break; } } @@ -4576,14 +2600,8 @@ public final class BackupProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = java.util.Collections.unmodifiableList(tableList_); - } - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - tstMap_ = java.util.Collections.unmodifiableList(tstMap_); - } - if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { - dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -4591,323 +2609,109 @@ public final class BackupProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupManifest parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableServerTimestamp parsePartialFrom( com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupManifest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string version = 1; - public static final int VERSION_FIELD_NUMBER = 1; - private java.lang.Object version_; - /** - * required string version = 1; - */ - public boolean hasVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string version = 1; - */ - public java.lang.String getVersion() { - java.lang.Object ref = version_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - version_ = s; - } - return s; - } - } - /** - * required string version = 1; - */ - public com.google.protobuf.ByteString - getVersionBytes() { - java.lang.Object ref = version_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - version_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required string backup_id = 2; - public static final int BACKUP_ID_FIELD_NUMBER = 2; - private java.lang.Object backupId_; - /** - * required string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } - /** - * required string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // required .hbase.pb.BackupType type = 3; - public static final int TYPE_FIELD_NUMBER = 3; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; - /** - * required .hbase.pb.BackupType type = 3; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required .hbase.pb.BackupType type = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; - } - - // repeated .hbase.pb.TableName table_list = 4; - public static final int TABLE_LIST_FIELD_NUMBER = 4; - private java.util.List tableList_; - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List getTableListList() { - return tableList_; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListOrBuilderList() { - return tableList_; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public int getTableListCount() { - return tableList_.size(); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { - return tableList_.get(index); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index) { - return tableList_.get(index); - } - - // required uint64 start_ts = 5; - public static final int START_TS_FIELD_NUMBER = 5; - private long startTs_; - /** - * required uint64 start_ts = 5; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required uint64 start_ts = 5; - */ - public long getStartTs() { - return startTs_; - } - - // required uint64 complete_ts = 6; - public static final int COMPLETE_TS_FIELD_NUMBER = 6; - private long completeTs_; - /** - * required uint64 complete_ts = 6; - */ - public boolean hasCompleteTs() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * required uint64 complete_ts = 6; - */ - public long getCompleteTs() { - return completeTs_; - } - - // repeated .hbase.pb.TableServerTimestamp tst_map = 7; - public static final int TST_MAP_FIELD_NUMBER = 7; - private java.util.List tstMap_; - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public java.util.List getTstMapList() { - return tstMap_; - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public java.util.List - getTstMapOrBuilderList() { - return tstMap_; + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableServerTimestamp(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + * required .hbase.pb.TableName table = 1; */ - public int getTstMapCount() { - return tstMap_.size(); + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + * required .hbase.pb.TableName table = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { - return tstMap_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; } /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + * required .hbase.pb.TableName table = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( - int index) { - return tstMap_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; } - // repeated .hbase.pb.BackupImage dependent_backup_image = 8; - public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 8; - private java.util.List dependentBackupImage_; + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; + private java.util.List serverTimestamp_; /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public java.util.List getDependentBackupImageList() { - return dependentBackupImage_; + public java.util.List getServerTimestampList() { + return serverTimestamp_; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public java.util.List - getDependentBackupImageOrBuilderList() { - return dependentBackupImage_; + public java.util.List + getServerTimestampOrBuilderList() { + return serverTimestamp_; } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public int getDependentBackupImageCount() { - return dependentBackupImage_.size(); + public int getServerTimestampCount() { + return serverTimestamp_.size(); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { - return dependentBackupImage_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + return serverTimestamp_.get(index); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( int index) { - return dependentBackupImage_.get(index); + return serverTimestamp_.get(index); } private void initFields() { - version_ = ""; - backupId_ = ""; - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - tableList_ = java.util.Collections.emptyList(); - startTs_ = 0L; - completeTs_ = 0L; - tstMap_ = java.util.Collections.emptyList(); - dependentBackupImage_ = java.util.Collections.emptyList(); + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + serverTimestamp_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasVersion()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBackupId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStartTs()) { + if (!hasTable()) { memoizedIsInitialized = 0; return false; } - if (!hasCompleteTs()) { + if (!getTable().isInitialized()) { memoizedIsInitialized = 0; return false; } - for (int i = 0; i < getTableListCount(); i++) { - if (!getTableList(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getTstMapCount(); i++) { - if (!getTstMap(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getDependentBackupImageCount(); i++) { - if (!getDependentBackupImage(i).isInitialized()) { + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -4920,28 +2724,10 @@ public final class BackupProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getVersionBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBackupIdBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeEnum(3, type_.getNumber()); - } - for (int i = 0; i < tableList_.size(); i++) { - output.writeMessage(4, tableList_.get(i)); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(5, startTs_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(6, completeTs_); - } - for (int i = 0; i < tstMap_.size(); i++) { - output.writeMessage(7, tstMap_.get(i)); + output.writeMessage(1, table_); } - for (int i = 0; i < dependentBackupImage_.size(); i++) { - output.writeMessage(8, dependentBackupImage_.get(i)); + for (int i = 0; i < serverTimestamp_.size(); i++) { + output.writeMessage(2, serverTimestamp_.get(i)); } getUnknownFields().writeTo(output); } @@ -4954,35 +2740,11 @@ public final class BackupProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getVersionBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBackupIdBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, type_.getNumber()); - } - for (int i = 0; i < tableList_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tableList_.get(i)); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, startTs_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, completeTs_); - } - for (int i = 0; i < tstMap_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, tstMap_.get(i)); + .computeMessageSize(1, table_); } - for (int i = 0; i < dependentBackupImage_.size(); i++) { + for (int i = 0; i < serverTimestamp_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(8, dependentBackupImage_.get(i)); + .computeMessageSize(2, serverTimestamp_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -5001,43 +2763,19 @@ public final class BackupProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) obj; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) obj; boolean result = true; - result = result && (hasVersion() == other.hasVersion()); - if (hasVersion()) { - result = result && getVersion() - .equals(other.getVersion()); - } - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && getTableListList() - .equals(other.getTableListList()); - result = result && (hasStartTs() == other.hasStartTs()); - if (hasStartTs()) { - result = result && (getStartTs() - == other.getStartTs()); - } - result = result && (hasCompleteTs() == other.hasCompleteTs()); - if (hasCompleteTs()) { - result = result && (getCompleteTs() - == other.getCompleteTs()); + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); } - result = result && getTstMapList() - .equals(other.getTstMapList()); - result = result && getDependentBackupImageList() - .equals(other.getDependentBackupImageList()); + result = result && getServerTimestampList() + .equals(other.getServerTimestampList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5051,90 +2789,66 @@ public final class BackupProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasVersion()) { - hash = (37 * hash) + VERSION_FIELD_NUMBER; - hash = (53 * hash) + getVersion().hashCode(); - } - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (getTableListCount() > 0) { - hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; - hash = (53 * hash) + getTableListList().hashCode(); - } - if (hasStartTs()) { - hash = (37 * hash) + START_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTs()); - } - if (hasCompleteTs()) { - hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCompleteTs()); - } - if (getTstMapCount() > 0) { - hash = (37 * hash) + TST_MAP_FIELD_NUMBER; - hash = (53 * hash) + getTstMapList().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); } - if (getDependentBackupImageCount() > 0) { - hash = (37 * hash) + DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER; - hash = (53 * hash) + getDependentBackupImageList().hashCode(); + if (getServerTimestampCount() > 0) { + hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getServerTimestampList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5143,7 +2857,7 @@ public final class BackupProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -5155,5552 +2869,5987 @@ public final class BackupProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupManifest} + * Protobuf type {@code hbase.pb.TableServerTimestamp} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + getServerTimestampFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (serverTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.serverTimestamp_ = serverTimestamp_; + } else { + result.serverTimestamp_ = serverTimestampBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (serverTimestampBuilder_ == null) { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestamp_.isEmpty()) { + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServerTimestampIsMutable(); + serverTimestamp_.addAll(other.serverTimestamp_); + } + onChanged(); + } + } else { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestampBuilder_.isEmpty()) { + serverTimestampBuilder_.dispose(); + serverTimestampBuilder_ = null; + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + serverTimestampBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerTimestampFieldBuilder() : null; + } else { + serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableListFieldBuilder(); - getTstMapFieldBuilder(); - getDependentBackupImageFieldBuilder(); + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; } + return tableBuilder_; } - private static Builder create() { - return new Builder(); + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + private java.util.List serverTimestamp_ = + java.util.Collections.emptyList(); + private void ensureServerTimestampIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); + bitField0_ |= 0x00000002; + } } - public Builder clear() { - super.clear(); - version_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - bitField0_ = (bitField0_ & ~0x00000004); - if (tableListBuilder_ == null) { - tableList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; + + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + if (serverTimestampBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverTimestamp_); } else { - tableListBuilder_.clear(); + return serverTimestampBuilder_.getMessageList(); } - startTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - completeTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - if (tstMapBuilder_ == null) { - tstMap_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.size(); } else { - tstMapBuilder_.clear(); + return serverTimestampBuilder_.getCount(); } - if (dependentBackupImageBuilder_ == null) { - dependentBackupImage_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); } else { - dependentBackupImageBuilder_.clear(); + return serverTimestampBuilder_.getMessage(index); } - return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, value); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, value); + } + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(value); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, value); } - return result; + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(builderForValue.build()); } - result.version_ = version_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, builderForValue.build()); } - result.backupId_ = backupId_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addAllServerTimestamp( + java.lang.Iterable values) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + super.addAll(values, serverTimestamp_); + onChanged(); + } else { + serverTimestampBuilder_.addAllMessages(values); } - result.type_ = type_; - if (tableListBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = java.util.Collections.unmodifiableList(tableList_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.tableList_ = tableList_; + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder clearServerTimestamp() { + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); } else { - result.tableList_ = tableListBuilder_.build(); + serverTimestampBuilder_.clear(); } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder removeServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.remove(index); + onChanged(); + } else { + serverTimestampBuilder_.remove(index); } - result.startTs_ = startTs_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000010; + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); } else { + return serverTimestampBuilder_.getMessageOrBuilder(index); } - result.completeTs_ = completeTs_; - if (tstMapBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040)) { - tstMap_ = java.util.Collections.unmodifiableList(tstMap_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.tstMap_ = tstMap_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + if (serverTimestampBuilder_ != null) { + return serverTimestampBuilder_.getMessageOrBuilderList(); } else { - result.tstMap_ = tstMapBuilder_.build(); + return java.util.Collections.unmodifiableList(serverTimestamp_); } - if (dependentBackupImageBuilder_ == null) { - if (((bitField0_ & 0x00000080) == 0x00000080)) { - dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); - bitField0_ = (bitField0_ & ~0x00000080); - } - result.dependentBackupImage_ = dependentBackupImage_; - } else { - result.dependentBackupImage_ = dependentBackupImageBuilder_.build(); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { + return getServerTimestampFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampBuilderList() { + return getServerTimestampFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> + getServerTimestampFieldBuilder() { + if (serverTimestampBuilder_ == null) { + serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( + serverTimestamp_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + serverTimestamp_ = null; } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + return serverTimestampBuilder_; } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)other); - } else { - super.mergeFrom(other); - return this; - } - } + // @@protoc_insertion_point(builder_scope:hbase.pb.TableServerTimestamp) + } + + static { + defaultInstance = new TableServerTimestamp(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableServerTimestamp) + } + + public interface BackupManifestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string version = 1; + /** + * required string version = 1; + */ + boolean hasVersion(); + /** + * required string version = 1; + */ + java.lang.String getVersion(); + /** + * required string version = 1; + */ + com.google.protobuf.ByteString + getVersionBytes(); + + // required string backup_id = 2; + /** + * required string backup_id = 2; + */ + boolean hasBackupId(); + /** + * required string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType type = 3; + /** + * required .hbase.pb.BackupType type = 3; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + java.util.List + getTstMapList(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + int getTstMapCount(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + java.util.List + getTstMapOrBuilderList(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index); + + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + java.util.List + getDependentBackupImageList(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + int getDependentBackupImageCount(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + java.util.List + getDependentBackupImageOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupManifest} + */ + public static final class BackupManifest extends + com.google.protobuf.GeneratedMessage + implements BackupManifestOrBuilder { + // Use BackupManifest.newBuilder() to construct. + private BackupManifest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupManifest defaultInstance; + public static BackupManifest getDefaultInstance() { + return defaultInstance; + } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance()) return this; - if (other.hasVersion()) { - bitField0_ |= 0x00000001; - version_ = other.version_; - onChanged(); - } - if (other.hasBackupId()) { - bitField0_ |= 0x00000002; - backupId_ = other.backupId_; - onChanged(); - } - if (other.hasType()) { - setType(other.getType()); - } - if (tableListBuilder_ == null) { - if (!other.tableList_.isEmpty()) { - if (tableList_.isEmpty()) { - tableList_ = other.tableList_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureTableListIsMutable(); - tableList_.addAll(other.tableList_); + public BackupManifest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupManifest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; } - onChanged(); - } - } else { - if (!other.tableList_.isEmpty()) { - if (tableListBuilder_.isEmpty()) { - tableListBuilder_.dispose(); - tableListBuilder_ = null; - tableList_ = other.tableList_; - bitField0_ = (bitField0_ & ~0x00000008); - tableListBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTableListFieldBuilder() : null; - } else { - tableListBuilder_.addAllMessages(other.tableList_); + case 10: { + bitField0_ |= 0x00000001; + version_ = input.readBytes(); + break; } - } - } - if (other.hasStartTs()) { - setStartTs(other.getStartTs()); - } - if (other.hasCompleteTs()) { - setCompleteTs(other.getCompleteTs()); - } - if (tstMapBuilder_ == null) { - if (!other.tstMap_.isEmpty()) { - if (tstMap_.isEmpty()) { - tstMap_ = other.tstMap_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureTstMapIsMutable(); - tstMap_.addAll(other.tstMap_); + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; } - onChanged(); - } - } else { - if (!other.tstMap_.isEmpty()) { - if (tstMapBuilder_.isEmpty()) { - tstMapBuilder_.dispose(); - tstMapBuilder_ = null; - tstMap_ = other.tstMap_; - bitField0_ = (bitField0_ & ~0x00000040); - tstMapBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTstMapFieldBuilder() : null; - } else { - tstMapBuilder_.addAllMessages(other.tstMap_); + case 24: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + type_ = value; + } + break; } - } - } - if (dependentBackupImageBuilder_ == null) { - if (!other.dependentBackupImage_.isEmpty()) { - if (dependentBackupImage_.isEmpty()) { - dependentBackupImage_ = other.dependentBackupImage_; - bitField0_ = (bitField0_ & ~0x00000080); - } else { - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.addAll(other.dependentBackupImage_); + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; } - onChanged(); - } - } else { - if (!other.dependentBackupImage_.isEmpty()) { - if (dependentBackupImageBuilder_.isEmpty()) { - dependentBackupImageBuilder_.dispose(); - dependentBackupImageBuilder_ = null; - dependentBackupImage_ = other.dependentBackupImage_; - bitField0_ = (bitField0_ & ~0x00000080); - dependentBackupImageBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getDependentBackupImageFieldBuilder() : null; - } else { - dependentBackupImageBuilder_.addAllMessages(other.dependentBackupImage_); + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tstMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.PARSER, extensionRegistry)); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + dependentBackupImage_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + break; } } } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasVersion()) { - - return false; - } - if (!hasBackupId()) { - - return false; - } - if (!hasType()) { - - return false; - } - if (!hasStartTs()) { - - return false; - } - if (!hasCompleteTs()) { - - return false; - } - for (int i = 0; i < getTableListCount(); i++) { - if (!getTableList(i).isInitialized()) { - - return false; - } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); } - for (int i = 0; i < getTstMapCount(); i++) { - if (!getTstMap(i).isInitialized()) { - - return false; - } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = java.util.Collections.unmodifiableList(tstMap_); } - for (int i = 0; i < getDependentBackupImageCount(); i++) { - if (!getDependentBackupImage(i).isInitialized()) { - - return false; - } + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); } - return true; + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + } - public Builder mergeFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupManifest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupManifest(input, extensionRegistry); } - private int bitField0_; + }; - // required string version = 1; - private java.lang.Object version_ = ""; - /** - * required string version = 1; - */ - public boolean hasVersion() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string version = 1; - */ - public java.lang.String getVersion() { - java.lang.Object ref = version_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private java.lang.Object version_; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { version_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string version = 1; - */ - public com.google.protobuf.ByteString - getVersionBytes() { - java.lang.Object ref = version_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - version_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; } + return s; } - /** - * required string version = 1; - */ - public Builder setVersion( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - version_ = value; - onChanged(); - return this; - } - /** - * required string version = 1; - */ - public Builder clearVersion() { - bitField0_ = (bitField0_ & ~0x00000001); - version_ = getDefaultInstance().getVersion(); - onChanged(); - return this; - } - /** - * required string version = 1; - */ - public Builder setVersionBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - version_ = value; - onChanged(); - return this; + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } - // required string backup_id = 2; - private java.lang.Object backupId_ = ""; - /** - * required string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); + // required string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * required string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string backup_id = 2; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - /** - * required string backup_id = 2; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000002); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; - } - /** - * required string backup_id = 2; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - - // required .hbase.pb.BackupType type = 3; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - /** - * required .hbase.pb.BackupType type = 3; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required .hbase.pb.BackupType type = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; - } - /** - * required .hbase.pb.BackupType type = 3; - */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); } - bitField0_ |= 0x00000004; - type_ = value; - onChanged(); - return this; + return s; } - /** - * required .hbase.pb.BackupType type = 3; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000004); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - onChanged(); - return this; + } + /** + * required string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } - // repeated .hbase.pb.TableName table_list = 4; - private java.util.List tableList_ = - java.util.Collections.emptyList(); - private void ensureTableListIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = new java.util.ArrayList(tableList_); - bitField0_ |= 0x00000008; - } - } + // required .hbase.pb.BackupType type = 3; + public static final int TYPE_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List getTableListList() { - if (tableListBuilder_ == null) { - return java.util.Collections.unmodifiableList(tableList_); - } else { - return tableListBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public int getTableListCount() { - if (tableListBuilder_ == null) { - return tableList_.size(); - } else { - return tableListBuilder_.getCount(); - } + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + public static final int TST_MAP_FIELD_NUMBER = 7; + private java.util.List tstMap_; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List getTstMapList() { + return tstMap_; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List + getTstMapOrBuilderList() { + return tstMap_; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public int getTstMapCount() { + return tstMap_.size(); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { + return tstMap_.get(index); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index) { + return tstMap_.get(index); + } + + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 8; + private java.util.List dependentBackupImage_; + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List getDependentBackupImageList() { + return dependentBackupImage_; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List + getDependentBackupImageOrBuilderList() { + return dependentBackupImage_; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public int getDependentBackupImageCount() { + return dependentBackupImage_.size(); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { + return dependentBackupImage_.get(index); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index) { + return dependentBackupImage_.get(index); + } + + private void initFields() { + version_ = ""; + backupId_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + tstMap_ = java.util.Collections.emptyList(); + dependentBackupImage_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { - if (tableListBuilder_ == null) { - return tableList_.get(index); - } else { - return tableListBuilder_.getMessage(index); - } + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder setTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.set(index, value); - onChanged(); - } else { - tableListBuilder_.setMessage(index, value); - } - return this; + if (!hasType()) { + memoizedIsInitialized = 0; + return false; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder setTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.set(index, builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.setMessage(index, builderForValue.build()); - } - return this; + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.add(value); - onChanged(); - } else { - tableListBuilder_.addMessage(value); - } - return this; + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.add(index, value); - onChanged(); - } else { - tableListBuilder_.addMessage(index, value); + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; } - return this; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.add(builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.addMessage(builderForValue.build()); + for (int i = 0; i < getTstMapCount(); i++) { + if (!getTstMap(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; } - return this; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.add(index, builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.addMessage(index, builderForValue.build()); + for (int i = 0; i < getDependentBackupImageCount(); i++) { + if (!getDependentBackupImage(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; } - return this; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addAllTableList( - java.lang.Iterable values) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - super.addAll(values, tableList_); - onChanged(); - } else { - tableListBuilder_.addAllMessages(values); - } - return this; + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getVersionBytes()); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder clearTableList() { - if (tableListBuilder_ == null) { - tableList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - tableListBuilder_.clear(); - } - return this; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder removeTableList(int index) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.remove(index); - onChanged(); - } else { - tableListBuilder_.remove(index); - } - return this; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, type_.getNumber()); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( - int index) { - return getTableListFieldBuilder().getBuilder(index); + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index) { - if (tableListBuilder_ == null) { - return tableList_.get(index); } else { - return tableListBuilder_.getMessageOrBuilder(index); - } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListOrBuilderList() { - if (tableListBuilder_ != null) { - return tableListBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tableList_); - } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { - return getTableListFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + for (int i = 0; i < tstMap_.size(); i++) { + output.writeMessage(7, tstMap_.get(i)); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( - int index) { - return getTableListFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + for (int i = 0; i < dependentBackupImage_.size(); i++) { + output.writeMessage(8, dependentBackupImage_.get(i)); } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListBuilderList() { - return getTableListFieldBuilder().getBuilderList(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getVersionBytes()); } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTableListFieldBuilder() { - if (tableListBuilder_ == null) { - tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - tableList_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - tableList_ = null; - } - return tableListBuilder_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, type_.getNumber()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + for (int i = 0; i < tstMap_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, tstMap_.get(i)); + } + for (int i = 0; i < dependentBackupImage_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, dependentBackupImage_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) obj; - // required uint64 start_ts = 5; - private long startTs_ ; - /** - * required uint64 start_ts = 5; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000010) == 0x00000010); + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && getVersion() + .equals(other.getVersion()); } - /** - * required uint64 start_ts = 5; - */ - public long getStartTs() { - return startTs_; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); } - /** - * required uint64 start_ts = 5; - */ - public Builder setStartTs(long value) { - bitField0_ |= 0x00000010; - startTs_ = value; - onChanged(); - return this; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); } - /** - * required uint64 start_ts = 5; - */ - public Builder clearStartTs() { - bitField0_ = (bitField0_ & ~0x00000010); - startTs_ = 0L; - onChanged(); - return this; + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); } + result = result && getTstMapList() + .equals(other.getTstMapList()); + result = result && getDependentBackupImageList() + .equals(other.getDependentBackupImageList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } - // required uint64 complete_ts = 6; - private long completeTs_ ; - /** - * required uint64 complete_ts = 6; - */ - public boolean hasCompleteTs() { - return ((bitField0_ & 0x00000020) == 0x00000020); + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - /** - * required uint64 complete_ts = 6; - */ - public long getCompleteTs() { - return completeTs_; + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); } - /** - * required uint64 complete_ts = 6; - */ - public Builder setCompleteTs(long value) { - bitField0_ |= 0x00000020; - completeTs_ = value; - onChanged(); - return this; + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); } - /** - * required uint64 complete_ts = 6; - */ - public Builder clearCompleteTs() { - bitField0_ = (bitField0_ & ~0x00000020); - completeTs_ = 0L; - onChanged(); - return this; + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); } - - // repeated .hbase.pb.TableServerTimestamp tst_map = 7; - private java.util.List tstMap_ = - java.util.Collections.emptyList(); - private void ensureTstMapIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { - tstMap_ = new java.util.ArrayList(tstMap_); - bitField0_ |= 0x00000040; - } + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> tstMapBuilder_; - - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public java.util.List getTstMapList() { - if (tstMapBuilder_ == null) { - return java.util.Collections.unmodifiableList(tstMap_); - } else { - return tstMapBuilder_.getMessageList(); - } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public int getTstMapCount() { - if (tstMapBuilder_ == null) { - return tstMap_.size(); - } else { - return tstMapBuilder_.getCount(); - } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { - if (tstMapBuilder_ == null) { - return tstMap_.get(index); - } else { - return tstMapBuilder_.getMessage(index); - } + if (getTstMapCount() > 0) { + hash = (37 * hash) + TST_MAP_FIELD_NUMBER; + hash = (53 * hash) + getTstMapList().hashCode(); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder setTstMap( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { - if (tstMapBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTstMapIsMutable(); - tstMap_.set(index, value); - onChanged(); - } else { - tstMapBuilder_.setMessage(index, value); - } - return this; + if (getDependentBackupImageCount() > 0) { + hash = (37 * hash) + DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER; + hash = (53 * hash) + getDependentBackupImageList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupManifest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder setTstMap( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { - if (tstMapBuilder_ == null) { - ensureTstMapIsMutable(); - tstMap_.set(index, builderForValue.build()); - onChanged(); - } else { - tstMapBuilder_.setMessage(index, builderForValue.build()); - } - return this; + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder addTstMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { - if (tstMapBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTstMapIsMutable(); - tstMap_.add(value); - onChanged(); - } else { - tstMapBuilder_.addMessage(value); - } - return this; + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder addTstMap( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { - if (tstMapBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTstMapIsMutable(); - tstMap_.add(index, value); - onChanged(); - } else { - tstMapBuilder_.addMessage(index, value); - } - return this; + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder addTstMap( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { - if (tstMapBuilder_ == null) { - ensureTstMapIsMutable(); - tstMap_.add(builderForValue.build()); - onChanged(); - } else { - tstMapBuilder_.addMessage(builderForValue.build()); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableListFieldBuilder(); + getTstMapFieldBuilder(); + getDependentBackupImageFieldBuilder(); } - return this; } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder addTstMap( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { - if (tstMapBuilder_ == null) { - ensureTstMapIsMutable(); - tstMap_.add(index, builderForValue.build()); - onChanged(); - } else { - tstMapBuilder_.addMessage(index, builderForValue.build()); - } - return this; + private static Builder create() { + return new Builder(); } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder addAllTstMap( - java.lang.Iterable values) { - if (tstMapBuilder_ == null) { - ensureTstMapIsMutable(); - super.addAll(values, tstMap_); - onChanged(); + + public Builder clear() { + super.clear(); + version_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); } else { - tstMapBuilder_.addAllMessages(values); + tableListBuilder_.clear(); } - return this; - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder clearTstMap() { + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); if (tstMapBuilder_ == null) { tstMap_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000040); - onChanged(); } else { tstMapBuilder_.clear(); } - return this; - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public Builder removeTstMap(int index) { - if (tstMapBuilder_ == null) { - ensureTstMapIsMutable(); - tstMap_.remove(index); - onChanged(); + if (dependentBackupImageBuilder_ == null) { + dependentBackupImage_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); } else { - tstMapBuilder_.remove(index); + dependentBackupImageBuilder_.clear(); } return this; } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder getTstMapBuilder( - int index) { - return getTstMapFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( - int index) { - if (tstMapBuilder_ == null) { - return tstMap_.get(index); } else { - return tstMapBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public java.util.List - getTstMapOrBuilderList() { - if (tstMapBuilder_ != null) { - return tstMapBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tstMap_); - } - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder() { - return getTstMapFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder( - int index) { - return getTstMapFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableServerTimestamp tst_map = 7; - */ - public java.util.List - getTstMapBuilderList() { - return getTstMapFieldBuilder().getBuilderList(); + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> - getTstMapFieldBuilder() { - if (tstMapBuilder_ == null) { - tstMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>( - tstMap_, - ((bitField0_ & 0x00000040) == 0x00000040), - getParentForChildren(), - isClean()); - tstMap_ = null; - } - return tstMapBuilder_; + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; } - // repeated .hbase.pb.BackupImage dependent_backup_image = 8; - private java.util.List dependentBackupImage_ = - java.util.Collections.emptyList(); - private void ensureDependentBackupImageIsMutable() { - if (!((bitField0_ & 0x00000080) == 0x00000080)) { - dependentBackupImage_ = new java.util.ArrayList(dependentBackupImage_); - bitField0_ |= 0x00000080; - } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance(); } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> dependentBackupImageBuilder_; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public java.util.List getDependentBackupImageList() { - if (dependentBackupImageBuilder_ == null) { - return java.util.Collections.unmodifiableList(dependentBackupImage_); - } else { - return dependentBackupImageBuilder_.getMessageList(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; } - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public int getDependentBackupImageCount() { - if (dependentBackupImageBuilder_ == null) { - return dependentBackupImage_.size(); + result.type_ = type_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; } else { - return dependentBackupImageBuilder_.getCount(); + result.tableList_ = tableListBuilder_.build(); } - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { - if (dependentBackupImageBuilder_ == null) { - return dependentBackupImage_.get(index); + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (tstMapBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = java.util.Collections.unmodifiableList(tstMap_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tstMap_ = tstMap_; } else { - return dependentBackupImageBuilder_.getMessage(index); + result.tstMap_ = tstMapBuilder_.build(); } - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder setDependentBackupImage( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { if (dependentBackupImageBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + if (((bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); + bitField0_ = (bitField0_ & ~0x00000080); } - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.set(index, value); - onChanged(); + result.dependentBackupImage_ = dependentBackupImage_; } else { - dependentBackupImageBuilder_.setMessage(index, value); + result.dependentBackupImage_ = dependentBackupImageBuilder_.build(); } - return this; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder setDependentBackupImage( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { - if (dependentBackupImageBuilder_ == null) { - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.set(index, builderForValue.build()); - onChanged(); + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)other); } else { - dependentBackupImageBuilder_.setMessage(index, builderForValue.build()); + super.mergeFrom(other); + return this; } - return this; } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder addDependentBackupImage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { - if (dependentBackupImageBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.add(value); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance()) return this; + if (other.hasVersion()) { + bitField0_ |= 0x00000001; + version_ = other.version_; onChanged(); - } else { - dependentBackupImageBuilder_.addMessage(value); } - return this; - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder addDependentBackupImage( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { - if (dependentBackupImageBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.add(index, value); + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); + } } else { - dependentBackupImageBuilder_.addMessage(index, value); + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } + } } - return this; - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder addDependentBackupImage( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { - if (dependentBackupImageBuilder_ == null) { - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.add(builderForValue.build()); - onChanged(); + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (tstMapBuilder_ == null) { + if (!other.tstMap_.isEmpty()) { + if (tstMap_.isEmpty()) { + tstMap_ = other.tstMap_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTstMapIsMutable(); + tstMap_.addAll(other.tstMap_); + } + onChanged(); + } } else { - dependentBackupImageBuilder_.addMessage(builderForValue.build()); + if (!other.tstMap_.isEmpty()) { + if (tstMapBuilder_.isEmpty()) { + tstMapBuilder_.dispose(); + tstMapBuilder_ = null; + tstMap_ = other.tstMap_; + bitField0_ = (bitField0_ & ~0x00000040); + tstMapBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTstMapFieldBuilder() : null; + } else { + tstMapBuilder_.addAllMessages(other.tstMap_); + } + } } - return this; - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder addDependentBackupImage( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { if (dependentBackupImageBuilder_ == null) { - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.add(index, builderForValue.build()); - onChanged(); + if (!other.dependentBackupImage_.isEmpty()) { + if (dependentBackupImage_.isEmpty()) { + dependentBackupImage_ = other.dependentBackupImage_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.addAll(other.dependentBackupImage_); + } + onChanged(); + } } else { - dependentBackupImageBuilder_.addMessage(index, builderForValue.build()); + if (!other.dependentBackupImage_.isEmpty()) { + if (dependentBackupImageBuilder_.isEmpty()) { + dependentBackupImageBuilder_.dispose(); + dependentBackupImageBuilder_ = null; + dependentBackupImage_ = other.dependentBackupImage_; + bitField0_ = (bitField0_ & ~0x00000080); + dependentBackupImageBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getDependentBackupImageFieldBuilder() : null; + } else { + dependentBackupImageBuilder_.addAllMessages(other.dependentBackupImage_); + } + } } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder addAllDependentBackupImage( - java.lang.Iterable values) { - if (dependentBackupImageBuilder_ == null) { - ensureDependentBackupImageIsMutable(); - super.addAll(values, dependentBackupImage_); - onChanged(); - } else { - dependentBackupImageBuilder_.addAllMessages(values); + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } } - return this; - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder clearDependentBackupImage() { - if (dependentBackupImageBuilder_ == null) { - dependentBackupImage_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000080); - onChanged(); - } else { - dependentBackupImageBuilder_.clear(); + for (int i = 0; i < getTstMapCount(); i++) { + if (!getTstMap(i).isInitialized()) { + + return false; + } } - return this; + for (int i = 0; i < getDependentBackupImageCount(); i++) { + if (!getDependentBackupImage(i).isInitialized()) { + + return false; + } + } + return true; } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public Builder removeDependentBackupImage(int index) { - if (dependentBackupImageBuilder_ == null) { - ensureDependentBackupImageIsMutable(); - dependentBackupImage_.remove(index); - onChanged(); - } else { - dependentBackupImageBuilder_.remove(index); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } } return this; } + private int bitField0_; + + // required string version = 1; + private java.lang.Object version_ = ""; /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getDependentBackupImageBuilder( - int index) { - return getDependentBackupImageFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * required string version = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( - int index) { - if (dependentBackupImageBuilder_ == null) { - return dependentBackupImage_.get(index); } else { - return dependentBackupImageBuilder_.getMessageOrBuilder(index); - } + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + * required string version = 1; */ - public java.util.List - getDependentBackupImageOrBuilderList() { - if (dependentBackupImageBuilder_ != null) { - return dependentBackupImageBuilder_.getMessageOrBuilderList(); + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + version_ = s; + return s; } else { - return java.util.Collections.unmodifiableList(dependentBackupImage_); + return (java.lang.String) ref; } } /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder() { - return getDependentBackupImageFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder( - int index) { - return getDependentBackupImageFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); - } - /** - * repeated .hbase.pb.BackupImage dependent_backup_image = 8; - */ - public java.util.List - getDependentBackupImageBuilderList() { - return getDependentBackupImageFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> - getDependentBackupImageFieldBuilder() { - if (dependentBackupImageBuilder_ == null) { - dependentBackupImageBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( - dependentBackupImage_, - ((bitField0_ & 0x00000080) == 0x00000080), - getParentForChildren(), - isClean()); - dependentBackupImage_ = null; - } - return dependentBackupImageBuilder_; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest) - } - - static { - defaultInstance = new BackupManifest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.BackupManifest) - } - - public interface TableBackupStatusOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.TableName table = 1; - /** - * required .hbase.pb.TableName table = 1; - */ - boolean hasTable(); - /** - * required .hbase.pb.TableName table = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); - /** - * required .hbase.pb.TableName table = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); - - // required string target_dir = 2; - /** - * required string target_dir = 2; - */ - boolean hasTargetDir(); - /** - * required string target_dir = 2; - */ - java.lang.String getTargetDir(); - /** - * required string target_dir = 2; - */ - com.google.protobuf.ByteString - getTargetDirBytes(); - - // optional string snapshot = 3; - /** - * optional string snapshot = 3; - */ - boolean hasSnapshot(); - /** - * optional string snapshot = 3; - */ - java.lang.String getSnapshot(); - /** - * optional string snapshot = 3; - */ - com.google.protobuf.ByteString - getSnapshotBytes(); - } - /** - * Protobuf type {@code hbase.pb.TableBackupStatus} - */ - public static final class TableBackupStatus extends - com.google.protobuf.GeneratedMessage - implements TableBackupStatusOrBuilder { - // Use TableBackupStatus.newBuilder() to construct. - private TableBackupStatus(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private TableBackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final TableBackupStatus defaultInstance; - public static TableBackupStatus getDefaultInstance() { - return defaultInstance; - } - - public TableBackupStatus getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private TableBackupStatus( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = table_.toBuilder(); - } - table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(table_); - table_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 18: { - bitField0_ |= 0x00000002; - targetDir_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - snapshot_ = input.readBytes(); - break; - } - } + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; - } + /** + * required string version = 1; + */ + public Builder setVersion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder setVersionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); - } + // required string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public TableBackupStatus parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TableBackupStatus(input, extensionRegistry); + // required .hbase.pb.BackupType type = 3; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000004); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; } - }; - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } + } - private int bitField0_; - // required .hbase.pb.TableName table = 1; - public static final int TABLE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; - /** - * required .hbase.pb.TableName table = 1; - */ - public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { - return table_; - } - /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { - return table_; - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; - // required string target_dir = 2; - public static final int TARGET_DIR_FIELD_NUMBER = 2; - private java.lang.Object targetDir_; - /** - * required string target_dir = 2; - */ - public boolean hasTargetDir() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string target_dir = 2; - */ - public java.lang.String getTargetDir() { - java.lang.Object ref = targetDir_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - targetDir_ = s; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); } - return s; } - } - /** - * required string target_dir = 2; - */ - public com.google.protobuf.ByteString - getTargetDirBytes() { - java.lang.Object ref = targetDir_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } } - } - - // optional string snapshot = 3; - public static final int SNAPSHOT_FIELD_NUMBER = 3; - private java.lang.Object snapshot_; - /** - * optional string snapshot = 3; - */ - public boolean hasSnapshot() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional string snapshot = 3; - */ - public java.lang.String getSnapshot() { - java.lang.Object ref = snapshot_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - snapshot_ = s; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); } - return s; + return this; } - } - /** - * optional string snapshot = 3; - */ - public com.google.protobuf.ByteString - getSnapshotBytes() { - java.lang.Object ref = snapshot_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - snapshot_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - } - - private void initFields() { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - targetDir_ = ""; - snapshot_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasTable()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; } - if (!hasTargetDir()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; } - if (!getTable().isInitialized()) { - memoizedIsInitialized = 0; - return false; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, table_); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getTargetDirBytes()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getSnapshotBytes()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, table_); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( + int index) { + return getTableListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); } else { + return tableListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + if (tableListBuilder_ != null) { + return tableListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableList_); + } } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getTargetDirBytes()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { + return getTableListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getSnapshotBytes()); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( + int index) { + return getTableListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListBuilderList() { + return getTableListFieldBuilder().getBuilderList(); } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)) { - return super.equals(obj); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableListFieldBuilder() { + if (tableListBuilder_ == null) { + tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableList_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tableList_ = null; + } + return tableListBuilder_; } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) obj; - boolean result = true; - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); + // required uint64 start_ts = 5; + private long startTs_ ; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - result = result && (hasTargetDir() == other.hasTargetDir()); - if (hasTargetDir()) { - result = result && getTargetDir() - .equals(other.getTargetDir()); + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; } - result = result && (hasSnapshot() == other.hasSnapshot()); - if (hasSnapshot()) { - result = result && getSnapshot() - .equals(other.getSnapshot()); + /** + * required uint64 start_ts = 5; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000010; + startTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_ts = 5; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000010); + startTs_ = 0L; + onChanged(); + return this; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + // required uint64 complete_ts = 6; + private long completeTs_ ; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000020) == 0x00000020); } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; } - if (hasTargetDir()) { - hash = (37 * hash) + TARGET_DIR_FIELD_NUMBER; - hash = (53 * hash) + getTargetDir().hashCode(); + /** + * required uint64 complete_ts = 6; + */ + public Builder setCompleteTs(long value) { + bitField0_ |= 0x00000020; + completeTs_ = value; + onChanged(); + return this; } - if (hasSnapshot()) { - hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; - hash = (53 * hash) + getSnapshot().hashCode(); + /** + * required uint64 complete_ts = 6; + */ + public Builder clearCompleteTs() { + bitField0_ = (bitField0_ & ~0x00000020); + completeTs_ = 0L; + onChanged(); + return this; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.TableBackupStatus} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + private java.util.List tstMap_ = + java.util.Collections.emptyList(); + private void ensureTstMapIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = new java.util.ArrayList(tstMap_); + bitField0_ |= 0x00000040; + } } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> tstMapBuilder_; - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List getTstMapList() { + if (tstMapBuilder_ == null) { + return java.util.Collections.unmodifiableList(tstMap_); + } else { + return tstMapBuilder_.getMessageList(); + } } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public int getTstMapCount() { + if (tstMapBuilder_ == null) { + return tstMap_.size(); + } else { + return tstMapBuilder_.getCount(); + } } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableFieldBuilder(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { + if (tstMapBuilder_ == null) { + return tstMap_.get(index); + } else { + return tstMapBuilder_.getMessage(index); } } - private static Builder create() { - return new Builder(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder setTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.set(index, value); + onChanged(); + } else { + tstMapBuilder_.setMessage(index, value); + } + return this; } - - public Builder clear() { - super.clear(); - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder setTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.set(index, builderForValue.build()); + onChanged(); } else { - tableBuilder_.clear(); + tstMapBuilder_.setMessage(index, builderForValue.build()); } - bitField0_ = (bitField0_ & ~0x00000001); - targetDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - snapshot_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.add(value); + onChanged(); + } else { + tstMapBuilder_.addMessage(value); + } + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.add(index, value); + onChanged(); + } else { + tstMapBuilder_.addMessage(index, value); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.add(builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.addMessage(builderForValue.build()); + } + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.add(index, builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.addMessage(index, builderForValue.build()); } - return result; + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addAllTstMap( + java.lang.Iterable values) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + super.addAll(values, tstMap_); + onChanged(); + } else { + tstMapBuilder_.addAllMessages(values); } - if (tableBuilder_ == null) { - result.table_ = table_; + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder clearTstMap() { + if (tstMapBuilder_ == null) { + tstMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); } else { - result.table_ = tableBuilder_.build(); + tstMapBuilder_.clear(); } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder removeTstMap(int index) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.remove(index); + onChanged(); + } else { + tstMapBuilder_.remove(index); } - result.targetDir_ = targetDir_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder getTstMapBuilder( + int index) { + return getTstMapFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index) { + if (tstMapBuilder_ == null) { + return tstMap_.get(index); } else { + return tstMapBuilder_.getMessageOrBuilder(index); } - result.snapshot_ = snapshot_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)other); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List + getTstMapOrBuilderList() { + if (tstMapBuilder_ != null) { + return tstMapBuilder_.getMessageOrBuilderList(); } else { - super.mergeFrom(other); - return this; + return java.util.Collections.unmodifiableList(tstMap_); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder() { + return getTstMapFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder( + int index) { + return getTstMapFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List + getTstMapBuilderList() { + return getTstMapFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> + getTstMapFieldBuilder() { + if (tstMapBuilder_ == null) { + tstMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>( + tstMap_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + tstMap_ = null; } + return tstMapBuilder_; } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()) return this; - if (other.hasTable()) { - mergeTable(other.getTable()); - } - if (other.hasTargetDir()) { - bitField0_ |= 0x00000002; - targetDir_ = other.targetDir_; - onChanged(); - } - if (other.hasSnapshot()) { - bitField0_ |= 0x00000004; - snapshot_ = other.snapshot_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + private java.util.List dependentBackupImage_ = + java.util.Collections.emptyList(); + private void ensureDependentBackupImageIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = new java.util.ArrayList(dependentBackupImage_); + bitField0_ |= 0x00000080; + } } - public final boolean isInitialized() { - if (!hasTable()) { - - return false; - } - if (!hasTargetDir()) { - - return false; - } - if (!getTable().isInitialized()) { - - return false; - } - return true; - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> dependentBackupImageBuilder_; - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List getDependentBackupImageList() { + if (dependentBackupImageBuilder_ == null) { + return java.util.Collections.unmodifiableList(dependentBackupImage_); + } else { + return dependentBackupImageBuilder_.getMessageList(); } - return this; } - private int bitField0_; - - // required .hbase.pb.TableName table = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public int getDependentBackupImageCount() { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.size(); + } else { + return dependentBackupImageBuilder_.getCount(); + } } /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { - if (tableBuilder_ == null) { - return table_; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.get(index); } else { - return tableBuilder_.getMessage(); + return dependentBackupImageBuilder_.getMessage(index); } } /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableBuilder_ == null) { + public Builder setDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - table_ = value; + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.set(index, value); onChanged(); } else { - tableBuilder_.setMessage(value); + dependentBackupImageBuilder_.setMessage(index, value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder setTable( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableBuilder_ == null) { - table_ = builderForValue.build(); + public Builder setDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.set(index, builderForValue.build()); onChanged(); } else { - tableBuilder_.setMessage(builderForValue.build()); + dependentBackupImageBuilder_.setMessage(index, builderForValue.build()); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { - table_ = - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); - } else { - table_ = value; + public Builder addDependentBackupImage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(value); onChanged(); } else { - tableBuilder_.mergeFrom(value); + dependentBackupImageBuilder_.addMessage(value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder clearTable() { - if (tableBuilder_ == null) { - table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + public Builder addDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(index, value); onChanged(); } else { - tableBuilder_.clear(); + dependentBackupImageBuilder_.addMessage(index, value); } - bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.TableName table = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getTableFieldBuilder().getBuilder(); - } - /** - * required .hbase.pb.TableName table = 1; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { - if (tableBuilder_ != null) { - return tableBuilder_.getMessageOrBuilder(); + public Builder addDependentBackupImage( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(builderForValue.build()); + onChanged(); } else { - return table_; - } - } - /** - * required .hbase.pb.TableName table = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTableFieldBuilder() { - if (tableBuilder_ == null) { - tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - table_, - getParentForChildren(), - isClean()); - table_ = null; + dependentBackupImageBuilder_.addMessage(builderForValue.build()); } - return tableBuilder_; - } - - // required string target_dir = 2; - private java.lang.Object targetDir_ = ""; - /** - * required string target_dir = 2; - */ - public boolean hasTargetDir() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return this; } /** - * required string target_dir = 2; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public java.lang.String getTargetDir() { - java.lang.Object ref = targetDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - targetDir_ = s; - return s; + public Builder addDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(index, builderForValue.build()); + onChanged(); } else { - return (java.lang.String) ref; + dependentBackupImageBuilder_.addMessage(index, builderForValue.build()); } + return this; } /** - * required string target_dir = 2; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public com.google.protobuf.ByteString - getTargetDirBytes() { - java.lang.Object ref = targetDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetDir_ = b; - return b; + public Builder addAllDependentBackupImage( + java.lang.Iterable values) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + super.addAll(values, dependentBackupImage_); + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + dependentBackupImageBuilder_.addAllMessages(values); } - } - /** - * required string target_dir = 2; - */ - public Builder setTargetDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - targetDir_ = value; - onChanged(); return this; } /** - * required string target_dir = 2; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder clearTargetDir() { - bitField0_ = (bitField0_ & ~0x00000002); - targetDir_ = getDefaultInstance().getTargetDir(); - onChanged(); + public Builder clearDependentBackupImage() { + if (dependentBackupImageBuilder_ == null) { + dependentBackupImage_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + dependentBackupImageBuilder_.clear(); + } return this; } /** - * required string target_dir = 2; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder setTargetDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - targetDir_ = value; - onChanged(); + public Builder removeDependentBackupImage(int index) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.remove(index); + onChanged(); + } else { + dependentBackupImageBuilder_.remove(index); + } return this; } - - // optional string snapshot = 3; - private java.lang.Object snapshot_ = ""; /** - * optional string snapshot = 3; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public boolean hasSnapshot() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getDependentBackupImageBuilder( + int index) { + return getDependentBackupImageFieldBuilder().getBuilder(index); } /** - * optional string snapshot = 3; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public java.lang.String getSnapshot() { - java.lang.Object ref = snapshot_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - snapshot_ = s; - return s; - } else { - return (java.lang.String) ref; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index) { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.get(index); } else { + return dependentBackupImageBuilder_.getMessageOrBuilder(index); } } /** - * optional string snapshot = 3; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public com.google.protobuf.ByteString - getSnapshotBytes() { - java.lang.Object ref = snapshot_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - snapshot_ = b; - return b; + public java.util.List + getDependentBackupImageOrBuilderList() { + if (dependentBackupImageBuilder_ != null) { + return dependentBackupImageBuilder_.getMessageOrBuilderList(); } else { - return (com.google.protobuf.ByteString) ref; + return java.util.Collections.unmodifiableList(dependentBackupImage_); } } /** - * optional string snapshot = 3; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder setSnapshot( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - snapshot_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder() { + return getDependentBackupImageFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); } /** - * optional string snapshot = 3; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder clearSnapshot() { - bitField0_ = (bitField0_ & ~0x00000004); - snapshot_ = getDefaultInstance().getSnapshot(); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder( + int index) { + return getDependentBackupImageFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); } /** - * optional string snapshot = 3; + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; */ - public Builder setSnapshotBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); + public java.util.List + getDependentBackupImageBuilderList() { + return getDependentBackupImageFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> + getDependentBackupImageFieldBuilder() { + if (dependentBackupImageBuilder_ == null) { + dependentBackupImageBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( + dependentBackupImage_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + dependentBackupImage_ = null; + } + return dependentBackupImageBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest) + } + + static { + defaultInstance = new BackupManifest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupManifest) } - bitField0_ |= 0x00000004; - snapshot_ = value; - onChanged(); - return this; + + public interface TableBackupStatusOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // required string target_dir = 2; + /** + * required string target_dir = 2; + */ + boolean hasTargetDir(); + /** + * required string target_dir = 2; + */ + java.lang.String getTargetDir(); + /** + * required string target_dir = 2; + */ + com.google.protobuf.ByteString + getTargetDirBytes(); + + // optional string snapshot = 3; + /** + * optional string snapshot = 3; + */ + boolean hasSnapshot(); + /** + * optional string snapshot = 3; + */ + java.lang.String getSnapshot(); + /** + * optional string snapshot = 3; + */ + com.google.protobuf.ByteString + getSnapshotBytes(); + } + /** + * Protobuf type {@code hbase.pb.TableBackupStatus} + */ + public static final class TableBackupStatus extends + com.google.protobuf.GeneratedMessage + implements TableBackupStatusOrBuilder { + // Use TableBackupStatus.newBuilder() to construct. + private TableBackupStatus(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableBackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableBackupStatus defaultInstance; + public static TableBackupStatus getDefaultInstance() { + return defaultInstance; + } + + public TableBackupStatus getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableBackupStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + targetDir_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + snapshot_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } - - // @@protoc_insertion_point(builder_scope:hbase.pb.TableBackupStatus) + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; } - static { - defaultInstance = new TableBackupStatus(true); - defaultInstance.initFields(); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); } - // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus) - } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableBackupStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableBackupStatus(input, extensionRegistry); + } + }; - public interface BackupInfoOrBuilder - extends com.google.protobuf.MessageOrBuilder { + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } - // required string backup_id = 1; + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - boolean hasBackupId(); + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - java.lang.String getBackupId(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - com.google.protobuf.ByteString - getBackupIdBytes(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } - // required .hbase.pb.BackupType type = 2; + // required string target_dir = 2; + public static final int TARGET_DIR_FIELD_NUMBER = 2; + private java.lang.Object targetDir_; /** - * required .hbase.pb.BackupType type = 2; + * required string target_dir = 2; */ - boolean hasType(); + public boolean hasTargetDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } /** - * required .hbase.pb.BackupType type = 2; + * required string target_dir = 2; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + public java.lang.String getTargetDir() { + java.lang.Object ref = targetDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetDir_ = s; + } + return s; + } + } + /** + * required string target_dir = 2; + */ + public com.google.protobuf.ByteString + getTargetDirBytes() { + java.lang.Object ref = targetDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string snapshot = 3; + public static final int SNAPSHOT_FIELD_NUMBER = 3; + private java.lang.Object snapshot_; + /** + * optional string snapshot = 3; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string snapshot = 3; + */ + public java.lang.String getSnapshot() { + java.lang.Object ref = snapshot_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + snapshot_ = s; + } + return s; + } + } + /** + * optional string snapshot = 3; + */ + public com.google.protobuf.ByteString + getSnapshotBytes() { + java.lang.Object ref = snapshot_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshot_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + targetDir_ = ""; + snapshot_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTargetDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getSnapshotBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTargetDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getSnapshotBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasTargetDir() == other.hasTargetDir()); + if (hasTargetDir()) { + result = result && getTargetDir() + .equals(other.getTargetDir()); + } + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } - // required string target_root_dir = 3; - /** - * required string target_root_dir = 3; - */ - boolean hasTargetRootDir(); - /** - * required string target_root_dir = 3; - */ - java.lang.String getTargetRootDir(); - /** - * required string target_root_dir = 3; - */ - com.google.protobuf.ByteString - getTargetRootDirBytes(); + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasTargetDir()) { + hash = (37 * hash) + TARGET_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetDir().hashCode(); + } + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } - // optional .hbase.pb.BackupInfo.BackupState state = 4; - /** - * optional .hbase.pb.BackupInfo.BackupState state = 4; - */ - boolean hasState(); - /** - * optional .hbase.pb.BackupInfo.BackupState state = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState(); + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } - // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - /** - * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - boolean hasPhase(); - /** - * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase(); + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } - // optional string failed_message = 6; - /** - * optional string failed_message = 6; - */ - boolean hasFailedMessage(); - /** - * optional string failed_message = 6; - */ - java.lang.String getFailedMessage(); + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } /** - * optional string failed_message = 6; + * Protobuf type {@code hbase.pb.TableBackupStatus} */ - com.google.protobuf.ByteString - getFailedMessageBytes(); + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } - // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - java.util.List - getTableBackupStatusList(); - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index); - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - int getTableBackupStatusCount(); - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - java.util.List - getTableBackupStatusOrBuilderList(); - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( - int index); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); + } - // optional uint64 start_ts = 8; - /** - * optional uint64 start_ts = 8; - */ - boolean hasStartTs(); - /** - * optional uint64 start_ts = 8; - */ - long getStartTs(); + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } - // optional uint64 end_ts = 9; - /** - * optional uint64 end_ts = 9; - */ - boolean hasEndTs(); - /** - * optional uint64 end_ts = 9; - */ - long getEndTs(); + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } - // optional uint32 progress = 10; - /** - * optional uint32 progress = 10; - */ - boolean hasProgress(); - /** - * optional uint32 progress = 10; - */ - int getProgress(); + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + targetDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + snapshot_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } - // optional string job_id = 11; - /** - * optional string job_id = 11; - */ - boolean hasJobId(); - /** - * optional string job_id = 11; - */ - java.lang.String getJobId(); - /** - * optional string job_id = 11; - */ - com.google.protobuf.ByteString - getJobIdBytes(); + public Builder clone() { + return create().mergeFrom(buildPartial()); + } - // required uint32 workers_number = 12; - /** - * required uint32 workers_number = 12; - */ - boolean hasWorkersNumber(); - /** - * required uint32 workers_number = 12; - */ - int getWorkersNumber(); + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } - // required uint64 bandwidth = 13; - /** - * required uint64 bandwidth = 13; - */ - boolean hasBandwidth(); - /** - * required uint64 bandwidth = 13; - */ - long getBandwidth(); - } - /** - * Protobuf type {@code hbase.pb.BackupInfo} - */ - public static final class BackupInfo extends - com.google.protobuf.GeneratedMessage - implements BackupInfoOrBuilder { - // Use BackupInfo.newBuilder() to construct. - private BackupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BackupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance(); + } - private static final BackupInfo defaultInstance; - public static BackupInfo getDefaultInstance() { - return defaultInstance; - } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } - public BackupInfo getDefaultInstanceForType() { - return defaultInstance; - } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.targetDir_ = targetDir_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.snapshot_ = snapshot_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BackupInfo( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - backupId_ = input.readBytes(); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - type_ = value; - } - break; - } - case 26: { - bitField0_ |= 0x00000004; - targetRootDir_ = input.readBytes(); - break; - } - case 32: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - state_ = value; - } - break; - } - case 40: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(5, rawValue); - } else { - bitField0_ |= 0x00000010; - phase_ = value; - } - break; - } - case 50: { - bitField0_ |= 0x00000020; - failedMessage_ = input.readBytes(); - break; - } - case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - tableBackupStatus_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000040; - } - tableBackupStatus_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.PARSER, extensionRegistry)); - break; - } - case 64: { - bitField0_ |= 0x00000040; - startTs_ = input.readUInt64(); - break; - } - case 72: { - bitField0_ |= 0x00000080; - endTs_ = input.readUInt64(); - break; - } - case 80: { - bitField0_ |= 0x00000100; - progress_ = input.readUInt32(); - break; - } - case 90: { - bitField0_ |= 0x00000200; - jobId_ = input.readBytes(); - break; - } - case 96: { - bitField0_ |= 0x00000400; - workersNumber_ = input.readUInt32(); - break; - } - case 104: { - bitField0_ |= 0x00000800; - bandwidth_ = input.readUInt64(); - break; - } - } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)other); + } else { + super.mergeFrom(other); + return this; } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); + if (other.hasTargetDir()) { + bitField0_ |= 0x00000002; + targetDir_ = other.targetDir_; + onChanged(); + } + if (other.hasSnapshot()) { + bitField0_ |= 0x00000004; + snapshot_ = other.snapshot_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; - } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); - } + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!hasTargetDir()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupInfo parsePartialFrom( + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupInfo(input, extensionRegistry); + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } + private int bitField0_; - /** - * Protobuf enum {@code hbase.pb.BackupInfo.BackupState} - */ - public enum BackupState - implements com.google.protobuf.ProtocolMessageEnum { - /** - * WAITING = 0; - */ - WAITING(0, 0), - /** - * RUNNING = 1; - */ - RUNNING(1, 1), + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; /** - * COMPLETE = 2; + * required .hbase.pb.TableName table = 1; */ - COMPLETE(2, 2), + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } /** - * FAILED = 3; + * required .hbase.pb.TableName table = 1; */ - FAILED(3, 3), + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } /** - * CANCELLED = 4; + * required .hbase.pb.TableName table = 1; */ - CANCELLED(4, 4), - ; - + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } /** - * WAITING = 0; + * required .hbase.pb.TableName table = 1; */ - public static final int WAITING_VALUE = 0; + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } /** - * RUNNING = 1; + * required .hbase.pb.TableName table = 1; */ - public static final int RUNNING_VALUE = 1; + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } /** - * COMPLETE = 2; + * required .hbase.pb.TableName table = 1; */ - public static final int COMPLETE_VALUE = 2; + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } /** - * FAILED = 3; + * required .hbase.pb.TableName table = 1; */ - public static final int FAILED_VALUE = 3; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } /** - * CANCELLED = 4; + * required .hbase.pb.TableName table = 1; */ - public static final int CANCELLED_VALUE = 4; - - - public final int getNumber() { return value; } - - public static BackupState valueOf(int value) { - switch (value) { - case 0: return WAITING; - case 1: return RUNNING; - case 2: return COMPLETE; - case 3: return FAILED; - case 4: return CANCELLED; - default: return null; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; } } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public BackupState findValueByNumber(int number) { - return BackupState.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(0); - } - - private static final BackupState[] VALUES = values(); - - public static BackupState valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private BackupState(int index, int value) { - this.index = index; - this.value = value; + return tableBuilder_; } - // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupState) - } - - /** - * Protobuf enum {@code hbase.pb.BackupInfo.BackupPhase} - */ - public enum BackupPhase - implements com.google.protobuf.ProtocolMessageEnum { + // required string target_dir = 2; + private java.lang.Object targetDir_ = ""; /** - * REQUEST = 0; + * required string target_dir = 2; */ - REQUEST(0, 0), + public boolean hasTargetDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } /** - * SNAPSHOT = 1; + * required string target_dir = 2; */ - SNAPSHOT(1, 1), + public java.lang.String getTargetDir() { + java.lang.Object ref = targetDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } /** - * PREPARE_INCREMENTAL = 2; + * required string target_dir = 2; */ - PREPARE_INCREMENTAL(2, 2), + public com.google.protobuf.ByteString + getTargetDirBytes() { + java.lang.Object ref = targetDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } /** - * SNAPSHOTCOPY = 3; + * required string target_dir = 2; */ - SNAPSHOTCOPY(3, 3), + public Builder setTargetDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + targetDir_ = value; + onChanged(); + return this; + } /** - * INCREMENTAL_COPY = 4; + * required string target_dir = 2; */ - INCREMENTAL_COPY(4, 4), + public Builder clearTargetDir() { + bitField0_ = (bitField0_ & ~0x00000002); + targetDir_ = getDefaultInstance().getTargetDir(); + onChanged(); + return this; + } /** - * STORE_MANIFEST = 5; + * required string target_dir = 2; */ - STORE_MANIFEST(5, 5), - ; + public Builder setTargetDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + targetDir_ = value; + onChanged(); + return this; + } + // optional string snapshot = 3; + private java.lang.Object snapshot_ = ""; /** - * REQUEST = 0; + * optional string snapshot = 3; */ - public static final int REQUEST_VALUE = 0; + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } /** - * SNAPSHOT = 1; + * optional string snapshot = 3; */ - public static final int SNAPSHOT_VALUE = 1; + public java.lang.String getSnapshot() { + java.lang.Object ref = snapshot_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + snapshot_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } /** - * PREPARE_INCREMENTAL = 2; + * optional string snapshot = 3; */ - public static final int PREPARE_INCREMENTAL_VALUE = 2; + public com.google.protobuf.ByteString + getSnapshotBytes() { + java.lang.Object ref = snapshot_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshot_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } /** - * SNAPSHOTCOPY = 3; + * optional string snapshot = 3; */ - public static final int SNAPSHOTCOPY_VALUE = 3; + public Builder setSnapshot( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + snapshot_ = value; + onChanged(); + return this; + } /** - * INCREMENTAL_COPY = 4; + * optional string snapshot = 3; */ - public static final int INCREMENTAL_COPY_VALUE = 4; + public Builder clearSnapshot() { + bitField0_ = (bitField0_ & ~0x00000004); + snapshot_ = getDefaultInstance().getSnapshot(); + onChanged(); + return this; + } /** - * STORE_MANIFEST = 5; + * optional string snapshot = 3; */ - public static final int STORE_MANIFEST_VALUE = 5; - - - public final int getNumber() { return value; } - - public static BackupPhase valueOf(int value) { - switch (value) { - case 0: return REQUEST; - case 1: return SNAPSHOT; - case 2: return PREPARE_INCREMENTAL; - case 3: return SNAPSHOTCOPY; - case 4: return INCREMENTAL_COPY; - case 5: return STORE_MANIFEST; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public BackupPhase findValueByNumber(int number) { - return BackupPhase.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(1); + public Builder setSnapshotBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + snapshot_ = value; + onChanged(); + return this; } - private static final BackupPhase[] VALUES = values(); - - public static BackupPhase valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } + // @@protoc_insertion_point(builder_scope:hbase.pb.TableBackupStatus) + } - private final int index; - private final int value; + static { + defaultInstance = new TableBackupStatus(true); + defaultInstance.initFields(); + } - private BackupPhase(int index, int value) { - this.index = index; - this.value = value; - } + // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus) + } - // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupPhase) - } + public interface BackupInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { - private int bitField0_; // required string backup_id = 1; - public static final int BACKUP_ID_FIELD_NUMBER = 1; - private java.lang.Object backupId_; /** * required string backup_id = 1; */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } + boolean hasBackupId(); /** * required string backup_id = 1; */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } + java.lang.String getBackupId(); /** * required string backup_id = 1; */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } + com.google.protobuf.ByteString + getBackupIdBytes(); // required .hbase.pb.BackupType type = 2; - public static final int TYPE_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; /** * required .hbase.pb.BackupType type = 2; */ - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } + boolean hasType(); /** * required .hbase.pb.BackupType type = 2; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; - } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); // required string target_root_dir = 3; - public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; - private java.lang.Object targetRootDir_; /** * required string target_root_dir = 3; */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } + boolean hasTargetRootDir(); /** * required string target_root_dir = 3; */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - targetRootDir_ = s; - } - return s; - } - } + java.lang.String getTargetRootDir(); /** * required string target_root_dir = 3; */ - public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } + com.google.protobuf.ByteString + getTargetRootDirBytes(); // optional .hbase.pb.BackupInfo.BackupState state = 4; - public static final int STATE_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_; /** * optional .hbase.pb.BackupInfo.BackupState state = 4; */ - public boolean hasState() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } + boolean hasState(); /** * optional .hbase.pb.BackupInfo.BackupState state = 4; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { - return state_; - } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState(); // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - public static final int PHASE_FIELD_NUMBER = 5; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_; /** * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - public boolean hasPhase() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } + */ + boolean hasPhase(); /** * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { - return phase_; - } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase(); // optional string failed_message = 6; - public static final int FAILED_MESSAGE_FIELD_NUMBER = 6; - private java.lang.Object failedMessage_; /** * optional string failed_message = 6; */ - public boolean hasFailedMessage() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } + boolean hasFailedMessage(); /** * optional string failed_message = 6; */ - public java.lang.String getFailedMessage() { - java.lang.Object ref = failedMessage_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - failedMessage_ = s; - } - return s; - } - } + java.lang.String getFailedMessage(); /** * optional string failed_message = 6; */ - public com.google.protobuf.ByteString - getFailedMessageBytes() { - java.lang.Object ref = failedMessage_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - failedMessage_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } + com.google.protobuf.ByteString + getFailedMessageBytes(); // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - public static final int TABLE_BACKUP_STATUS_FIELD_NUMBER = 7; - private java.util.List tableBackupStatus_; /** * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public java.util.List getTableBackupStatusList() { - return tableBackupStatus_; - } + java.util.List + getTableBackupStatusList(); /** * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public java.util.List - getTableBackupStatusOrBuilderList() { - return tableBackupStatus_; - } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index); /** * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public int getTableBackupStatusCount() { - return tableBackupStatus_.size(); - } + int getTableBackupStatusCount(); /** * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { - return tableBackupStatus_.get(index); + java.util.List + getTableBackupStatusOrBuilderList(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index); + + // optional uint64 start_ts = 8; + /** + * optional uint64 start_ts = 8; + */ + boolean hasStartTs(); + /** + * optional uint64 start_ts = 8; + */ + long getStartTs(); + + // optional uint64 end_ts = 9; + /** + * optional uint64 end_ts = 9; + */ + boolean hasEndTs(); + /** + * optional uint64 end_ts = 9; + */ + long getEndTs(); + + // optional uint32 progress = 10; + /** + * optional uint32 progress = 10; + */ + boolean hasProgress(); + /** + * optional uint32 progress = 10; + */ + int getProgress(); + + // optional string job_id = 11; + /** + * optional string job_id = 11; + */ + boolean hasJobId(); + /** + * optional string job_id = 11; + */ + java.lang.String getJobId(); + /** + * optional string job_id = 11; + */ + com.google.protobuf.ByteString + getJobIdBytes(); + + // required uint32 workers_number = 12; + /** + * required uint32 workers_number = 12; + */ + boolean hasWorkersNumber(); + /** + * required uint32 workers_number = 12; + */ + int getWorkersNumber(); + + // required uint64 bandwidth = 13; + /** + * required uint64 bandwidth = 13; + */ + boolean hasBandwidth(); + /** + * required uint64 bandwidth = 13; + */ + long getBandwidth(); + } + /** + * Protobuf type {@code hbase.pb.BackupInfo} + */ + public static final class BackupInfo extends + com.google.protobuf.GeneratedMessage + implements BackupInfoOrBuilder { + // Use BackupInfo.newBuilder() to construct. + private BackupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupInfo defaultInstance; + public static BackupInfo getDefaultInstance() { + return defaultInstance; + } + + public BackupInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + type_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + targetRootDir_ = input.readBytes(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + state_ = value; + } + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + phase_ = value; + } + break; + } + case 50: { + bitField0_ |= 0x00000020; + failedMessage_ = input.readBytes(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tableBackupStatus_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.PARSER, extensionRegistry)); + break; + } + case 64: { + bitField0_ |= 0x00000040; + startTs_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000080; + endTs_ = input.readUInt64(); + break; + } + case 80: { + bitField0_ |= 0x00000100; + progress_ = input.readUInt32(); + break; + } + case 90: { + bitField0_ |= 0x00000200; + jobId_ = input.readBytes(); + break; + } + case 96: { + bitField0_ |= 0x00000400; + workersNumber_ = input.readUInt32(); + break; + } + case 104: { + bitField0_ |= 0x00000800; + bandwidth_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( - int index) { - return tableBackupStatus_.get(index); + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; } - // optional uint64 start_ts = 8; - public static final int START_TS_FIELD_NUMBER = 8; - private long startTs_; - /** - * optional uint64 start_ts = 8; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional uint64 start_ts = 8; - */ - public long getStartTs() { - return startTs_; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); } - // optional uint64 end_ts = 9; - public static final int END_TS_FIELD_NUMBER = 9; - private long endTs_; - /** - * optional uint64 end_ts = 9; - */ - public boolean hasEndTs() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional uint64 end_ts = 9; - */ - public long getEndTs() { - return endTs_; - } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupInfo(input, extensionRegistry); + } + }; - // optional uint32 progress = 10; - public static final int PROGRESS_FIELD_NUMBER = 10; - private int progress_; - /** - * optional uint32 progress = 10; - */ - public boolean hasProgress() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional uint32 progress = 10; - */ - public int getProgress() { - return progress_; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - // optional string job_id = 11; - public static final int JOB_ID_FIELD_NUMBER = 11; - private java.lang.Object jobId_; - /** - * optional string job_id = 11; - */ - public boolean hasJobId() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional string job_id = 11; - */ - public java.lang.String getJobId() { - java.lang.Object ref = jobId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - jobId_ = s; - } - return s; - } - } /** - * optional string job_id = 11; + * Protobuf enum {@code hbase.pb.BackupInfo.BackupState} */ - public com.google.protobuf.ByteString - getJobIdBytes() { - java.lang.Object ref = jobId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - jobId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } + public enum BackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * WAITING = 0; + */ + WAITING(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * COMPLETE = 2; + */ + COMPLETE(2, 2), + /** + * FAILED = 3; + */ + FAILED(3, 3), + /** + * CANCELLED = 4; + */ + CANCELLED(4, 4), + ; - // required uint32 workers_number = 12; - public static final int WORKERS_NUMBER_FIELD_NUMBER = 12; - private int workersNumber_; - /** - * required uint32 workers_number = 12; - */ - public boolean hasWorkersNumber() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * required uint32 workers_number = 12; - */ - public int getWorkersNumber() { - return workersNumber_; - } + /** + * WAITING = 0; + */ + public static final int WAITING_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * COMPLETE = 2; + */ + public static final int COMPLETE_VALUE = 2; + /** + * FAILED = 3; + */ + public static final int FAILED_VALUE = 3; + /** + * CANCELLED = 4; + */ + public static final int CANCELLED_VALUE = 4; - // required uint64 bandwidth = 13; - public static final int BANDWIDTH_FIELD_NUMBER = 13; - private long bandwidth_; - /** - * required uint64 bandwidth = 13; - */ - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * required uint64 bandwidth = 13; - */ - public long getBandwidth() { - return bandwidth_; - } - private void initFields() { - backupId_ = ""; - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - targetRootDir_ = ""; - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; - failedMessage_ = ""; - tableBackupStatus_ = java.util.Collections.emptyList(); - startTs_ = 0L; - endTs_ = 0L; - progress_ = 0; - jobId_ = ""; - workersNumber_ = 0; - bandwidth_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; + public final int getNumber() { return value; } - if (!hasBackupId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTargetRootDir()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasWorkersNumber()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBandwidth()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTableBackupStatusCount(); i++) { - if (!getTableBackupStatus(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; + public static BackupState valueOf(int value) { + switch (value) { + case 0: return WAITING; + case 1: return RUNNING; + case 2: return COMPLETE; + case 3: return FAILED; + case 4: return CANCELLED; + default: return null; } } - memoizedIsInitialized = 1; - return true; - } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBackupIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, type_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getTargetRootDirBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, state_.getNumber()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeEnum(5, phase_.getNumber()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBytes(6, getFailedMessageBytes()); - } - for (int i = 0; i < tableBackupStatus_.size(); i++) { - output.writeMessage(7, tableBackupStatus_.get(i)); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt64(8, startTs_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeUInt64(9, endTs_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeUInt32(10, progress_); + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeBytes(11, getJobIdBytes()); + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupState findValueByNumber(int number) { + return BackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeUInt32(12, workersNumber_); + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - output.writeUInt64(13, bandwidth_); + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(0); } - getUnknownFields().writeTo(output); - } - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; + private static final BackupState[] VALUES = values(); - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBackupIdBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, type_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getTargetRootDirBytes()); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, state_.getNumber()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(5, phase_.getNumber()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(6, getFailedMessageBytes()); - } - for (int i = 0; i < tableBackupStatus_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, tableBackupStatus_.get(i)); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, startTs_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(9, endTs_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(10, progress_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getJobIdBytes()); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(12, workersNumber_); + public static BackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(13, bandwidth_); + + private final int index; + private final int value; + + private BackupState(int index, int value) { + this.index = index; + this.value = value; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupState) } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) obj; + /** + * Protobuf enum {@code hbase.pb.BackupInfo.BackupPhase} + */ + public enum BackupPhase + implements com.google.protobuf.ProtocolMessageEnum { + /** + * REQUEST = 0; + */ + REQUEST(0, 0), + /** + * SNAPSHOT = 1; + */ + SNAPSHOT(1, 1), + /** + * PREPARE_INCREMENTAL = 2; + */ + PREPARE_INCREMENTAL(2, 2), + /** + * SNAPSHOTCOPY = 3; + */ + SNAPSHOTCOPY(3, 3), + /** + * INCREMENTAL_COPY = 4; + */ + INCREMENTAL_COPY(4, 4), + /** + * STORE_MANIFEST = 5; + */ + STORE_MANIFEST(5, 5), + ; - boolean result = true; - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && (hasTargetRootDir() == other.hasTargetRootDir()); - if (hasTargetRootDir()) { - result = result && getTargetRootDir() - .equals(other.getTargetRootDir()); - } - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && (hasPhase() == other.hasPhase()); - if (hasPhase()) { - result = result && - (getPhase() == other.getPhase()); - } - result = result && (hasFailedMessage() == other.hasFailedMessage()); - if (hasFailedMessage()) { - result = result && getFailedMessage() - .equals(other.getFailedMessage()); + /** + * REQUEST = 0; + */ + public static final int REQUEST_VALUE = 0; + /** + * SNAPSHOT = 1; + */ + public static final int SNAPSHOT_VALUE = 1; + /** + * PREPARE_INCREMENTAL = 2; + */ + public static final int PREPARE_INCREMENTAL_VALUE = 2; + /** + * SNAPSHOTCOPY = 3; + */ + public static final int SNAPSHOTCOPY_VALUE = 3; + /** + * INCREMENTAL_COPY = 4; + */ + public static final int INCREMENTAL_COPY_VALUE = 4; + /** + * STORE_MANIFEST = 5; + */ + public static final int STORE_MANIFEST_VALUE = 5; + + + public final int getNumber() { return value; } + + public static BackupPhase valueOf(int value) { + switch (value) { + case 0: return REQUEST; + case 1: return SNAPSHOT; + case 2: return PREPARE_INCREMENTAL; + case 3: return SNAPSHOTCOPY; + case 4: return INCREMENTAL_COPY; + case 5: return STORE_MANIFEST; + default: return null; + } } - result = result && getTableBackupStatusList() - .equals(other.getTableBackupStatusList()); - result = result && (hasStartTs() == other.hasStartTs()); - if (hasStartTs()) { - result = result && (getStartTs() - == other.getStartTs()); + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; } - result = result && (hasEndTs() == other.hasEndTs()); - if (hasEndTs()) { - result = result && (getEndTs() - == other.getEndTs()); + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupPhase findValueByNumber(int number) { + return BackupPhase.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); } - result = result && (hasProgress() == other.hasProgress()); - if (hasProgress()) { - result = result && (getProgress() - == other.getProgress()); + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); } - result = result && (hasJobId() == other.hasJobId()); - if (hasJobId()) { - result = result && getJobId() - .equals(other.getJobId()); + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(1); } - result = result && (hasWorkersNumber() == other.hasWorkersNumber()); - if (hasWorkersNumber()) { - result = result && (getWorkersNumber() - == other.getWorkersNumber()); + + private static final BackupPhase[] VALUES = values(); + + public static BackupPhase valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; } - result = result && (hasBandwidth() == other.hasBandwidth()); - if (hasBandwidth()) { - result = result && (getBandwidth() - == other.getBandwidth()); + + private final int index; + private final int value; + + private BackupPhase(int index, int value) { + this.index = index; + this.value = value; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupPhase) } - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (hasTargetRootDir()) { - hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getTargetRootDir().hashCode(); - } - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - if (hasPhase()) { - hash = (37 * hash) + PHASE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getPhase()); - } - if (hasFailedMessage()) { - hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER; - hash = (53 * hash) + getFailedMessage().hashCode(); - } - if (getTableBackupStatusCount() > 0) { - hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER; - hash = (53 * hash) + getTableBackupStatusList().hashCode(); - } - if (hasStartTs()) { - hash = (37 * hash) + START_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTs()); - } - if (hasEndTs()) { - hash = (37 * hash) + END_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getEndTs()); - } - if (hasProgress()) { - hash = (37 * hash) + PROGRESS_FIELD_NUMBER; - hash = (53 * hash) + getProgress(); + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; } - if (hasJobId()) { - hash = (37 * hash) + JOB_ID_FIELD_NUMBER; - hash = (53 * hash) + getJobId().hashCode(); + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - if (hasWorkersNumber()) { - hash = (37 * hash) + WORKERS_NUMBER_FIELD_NUMBER; - hash = (53 * hash) + getWorkersNumber(); + } + + // required .hbase.pb.BackupType type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; } - if (hasBandwidth()) { - hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBandwidth()); + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); + // optional .hbase.pb.BackupInfo.BackupState state = 4; + public static final int STATE_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_; + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { + return state_; + } + + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + public static final int PHASE_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_; + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { + return phase_; + } + + // optional string failed_message = 6; + public static final int FAILED_MESSAGE_FIELD_NUMBER = 6; + private java.lang.Object failedMessage_; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + failedMessage_ = s; + } + return s; + } } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + public static final int TABLE_BACKUP_STATUS_FIELD_NUMBER = 7; + private java.util.List tableBackupStatus_; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + return tableBackupStatus_; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusOrBuilderList() { + return tableBackupStatus_; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { + return tableBackupStatus_.size(); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + return tableBackupStatus_.get(index); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + return tableBackupStatus_.get(index); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); + + // optional uint64 start_ts = 8; + public static final int START_TS_FIELD_NUMBER = 8; + private long startTs_; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000040) == 0x00000040); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; } - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo prototype) { - return newBuilder().mergeFrom(prototype); + // optional uint64 end_ts = 9; + public static final int END_TS_FIELD_NUMBER = 9; + private long endTs_; + /** + * optional uint64 end_ts = 9; + */ + public boolean hasEndTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 end_ts = 9; + */ + public long getEndTs() { + return endTs_; } - public Builder toBuilder() { return newBuilder(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + // optional uint32 progress = 10; + public static final int PROGRESS_FIELD_NUMBER = 10; + private int progress_; + /** + * optional uint32 progress = 10; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * Protobuf type {@code hbase.pb.BackupInfo} + * optional uint32 progress = 10; */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableBackupStatusFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } + public int getProgress() { + return progress_; + } - public Builder clear() { - super.clear(); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - bitField0_ = (bitField0_ & ~0x00000002); - targetRootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; - bitField0_ = (bitField0_ & ~0x00000008); - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; - bitField0_ = (bitField0_ & ~0x00000010); - failedMessage_ = ""; - bitField0_ = (bitField0_ & ~0x00000020); - if (tableBackupStatusBuilder_ == null) { - tableBackupStatus_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - } else { - tableBackupStatusBuilder_.clear(); + // optional string job_id = 11; + public static final int JOB_ID_FIELD_NUMBER = 11; + private java.lang.Object jobId_; + /** + * optional string job_id = 11; + */ + public boolean hasJobId() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string job_id = 11; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + jobId_ = s; } - startTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - endTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000100); - progress_ = 0; - bitField0_ = (bitField0_ & ~0x00000200); - jobId_ = ""; - bitField0_ = (bitField0_ & ~0x00000400); - workersNumber_ = 0; - bitField0_ = (bitField0_ & ~0x00000800); - bandwidth_ = 0L; - bitField0_ = (bitField0_ & ~0x00001000); - return this; + return s; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + } + /** + * optional string job_id = 11; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } + } - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; - } + // required uint32 workers_number = 12; + public static final int WORKERS_NUMBER_FIELD_NUMBER = 12; + private int workersNumber_; + /** + * required uint32 workers_number = 12; + */ + public boolean hasWorkersNumber() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * required uint32 workers_number = 12; + */ + public int getWorkersNumber() { + return workersNumber_; + } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); - } + // required uint64 bandwidth = 13; + public static final int BANDWIDTH_FIELD_NUMBER = 13; + private long bandwidth_; + /** + * required uint64 bandwidth = 13; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * required uint64 bandwidth = 13; + */ + public long getBandwidth() { + return bandwidth_; + } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } + private void initFields() { + backupId_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + targetRootDir_ = ""; + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + failedMessage_ = ""; + tableBackupStatus_ = java.util.Collections.emptyList(); + startTs_ = 0L; + endTs_ = 0L; + progress_ = 0; + jobId_ = ""; + workersNumber_ = 0; + bandwidth_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.backupId_ = backupId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.targetRootDir_ = targetRootDir_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.state_ = state_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.phase_ = phase_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.failedMessage_ = failedMessage_; - if (tableBackupStatusBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040)) { - tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.tableBackupStatus_ = tableBackupStatus_; - } else { - result.tableBackupStatus_ = tableBackupStatusBuilder_.build(); - } - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; - } - result.startTs_ = startTs_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000080; - } - result.endTs_ = endTs_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000100; - } - result.progress_ = progress_; - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000200; - } - result.jobId_ = jobId_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000400; - } - result.workersNumber_ = workersNumber_; - if (((from_bitField0_ & 0x00001000) == 0x00001000)) { - to_bitField0_ |= 0x00000800; - } - result.bandwidth_ = bandwidth_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)other); - } else { - super.mergeFrom(other); - return this; - } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) return this; - if (other.hasBackupId()) { - bitField0_ |= 0x00000001; - backupId_ = other.backupId_; - onChanged(); - } - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasTargetRootDir()) { - bitField0_ |= 0x00000004; - targetRootDir_ = other.targetRootDir_; - onChanged(); - } - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasPhase()) { - setPhase(other.getPhase()); - } - if (other.hasFailedMessage()) { - bitField0_ |= 0x00000020; - failedMessage_ = other.failedMessage_; - onChanged(); - } - if (tableBackupStatusBuilder_ == null) { - if (!other.tableBackupStatus_.isEmpty()) { - if (tableBackupStatus_.isEmpty()) { - tableBackupStatus_ = other.tableBackupStatus_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.addAll(other.tableBackupStatus_); - } - onChanged(); - } - } else { - if (!other.tableBackupStatus_.isEmpty()) { - if (tableBackupStatusBuilder_.isEmpty()) { - tableBackupStatusBuilder_.dispose(); - tableBackupStatusBuilder_ = null; - tableBackupStatus_ = other.tableBackupStatus_; - bitField0_ = (bitField0_ & ~0x00000040); - tableBackupStatusBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTableBackupStatusFieldBuilder() : null; - } else { - tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_); - } - } - } - if (other.hasStartTs()) { - setStartTs(other.getStartTs()); - } - if (other.hasEndTs()) { - setEndTs(other.getEndTs()); - } - if (other.hasProgress()) { - setProgress(other.getProgress()); - } - if (other.hasJobId()) { - bitField0_ |= 0x00000400; - jobId_ = other.jobId_; - onChanged(); - } - if (other.hasWorkersNumber()) { - setWorkersNumber(other.getWorkersNumber()); - } - if (other.hasBandwidth()) { - setBandwidth(other.getBandwidth()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; } - - public final boolean isInitialized() { - if (!hasBackupId()) { - - return false; - } - if (!hasType()) { - - return false; - } - if (!hasTargetRootDir()) { - - return false; - } - if (!hasWorkersNumber()) { - - return false; - } - if (!hasBandwidth()) { - + if (!hasWorkersNumber()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBandwidth()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + memoizedIsInitialized = 0; return false; } - for (int i = 0; i < getTableBackupStatusCount(); i++) { - if (!getTableBackupStatus(i).isInitialized()) { - - return false; - } - } - return true; } + memoizedIsInitialized = 1; + return true; + } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); } - private int bitField0_; - - // required string backup_id = 1; - private java.lang.Object backupId_ = ""; - /** - * required string backup_id = 1; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, type_.getNumber()); } - /** - * required string backup_id = 1; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getTargetRootDirBytes()); } - /** - * required string backup_id = 1; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, state_.getNumber()); } - /** - * required string backup_id = 1; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, phase_.getNumber()); } - /** - * required string backup_id = 1; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getFailedMessageBytes()); } - /** - * required string backup_id = 1; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; + for (int i = 0; i < tableBackupStatus_.size(); i++) { + output.writeMessage(7, tableBackupStatus_.get(i)); } - - // required .hbase.pb.BackupType type = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - /** - * required .hbase.pb.BackupType type = 2; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt64(8, startTs_); } - /** - * required .hbase.pb.BackupType type = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt64(9, endTs_); } - /** - * required .hbase.pb.BackupType type = 2; - */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - type_ = value; - onChanged(); - return this; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeUInt32(10, progress_); } - /** - * required .hbase.pb.BackupType type = 2; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000002); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - onChanged(); - return this; + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, getJobIdBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeUInt32(12, workersNumber_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + output.writeUInt64(13, bandwidth_); } + getUnknownFields().writeTo(output); + } - // required string target_root_dir = 3; - private java.lang.Object targetRootDir_ = ""; - /** - * required string target_root_dir = 3; - */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); } - /** - * required string target_root_dir = 3; - */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - targetRootDir_ = s; - return s; - } else { - return (java.lang.String) ref; - } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, type_.getNumber()); } - /** - * required string target_root_dir = 3; - */ - public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); } - /** - * required string target_root_dir = 3; - */ - public Builder setTargetRootDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - targetRootDir_ = value; - onChanged(); - return this; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(10, progress_); } - /** - * required string target_root_dir = 3; - */ - public Builder clearTargetRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - targetRootDir_ = getDefaultInstance().getTargetRootDir(); - onChanged(); - return this; + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getJobIdBytes()); } - /** - * required string target_root_dir = 3; - */ - public Builder setTargetRootDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - targetRootDir_ = value; - onChanged(); - return this; + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(12, workersNumber_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(13, bandwidth_); } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } - // optional .hbase.pb.BackupInfo.BackupState state = 4; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; - /** - * optional .hbase.pb.BackupInfo.BackupState state = 4; - */ - public boolean hasState() { - return ((bitField0_ & 0x00000008) == 0x00000008); + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; } - /** - * optional .hbase.pb.BackupInfo.BackupState state = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { - return state_; + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)) { + return super.equals(obj); } - /** - * optional .hbase.pb.BackupInfo.BackupState state = 4; - */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - state_ = value; - onChanged(); - return this; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); } - /** - * optional .hbase.pb.BackupInfo.BackupState state = 4; - */ - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000008); - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; - onChanged(); - return this; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); } - - // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; - /** - * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - public boolean hasPhase() { - return ((bitField0_ & 0x00000010) == 0x00000010); + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); } - /** - * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { - return phase_; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); } - /** - * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000010; - phase_ = value; - onChanged(); - return this; + result = result && (hasPhase() == other.hasPhase()); + if (hasPhase()) { + result = result && + (getPhase() == other.getPhase()); } - /** - * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; - */ - public Builder clearPhase() { - bitField0_ = (bitField0_ & ~0x00000010); - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; - onChanged(); - return this; + result = result && (hasFailedMessage() == other.hasFailedMessage()); + if (hasFailedMessage()) { + result = result && getFailedMessage() + .equals(other.getFailedMessage()); } - - // optional string failed_message = 6; - private java.lang.Object failedMessage_ = ""; - /** - * optional string failed_message = 6; - */ - public boolean hasFailedMessage() { - return ((bitField0_ & 0x00000020) == 0x00000020); + result = result && getTableBackupStatusList() + .equals(other.getTableBackupStatusList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); } - /** - * optional string failed_message = 6; - */ - public java.lang.String getFailedMessage() { - java.lang.Object ref = failedMessage_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - failedMessage_ = s; - return s; - } else { - return (java.lang.String) ref; - } + result = result && (hasEndTs() == other.hasEndTs()); + if (hasEndTs()) { + result = result && (getEndTs() + == other.getEndTs()); } - /** - * optional string failed_message = 6; - */ - public com.google.protobuf.ByteString - getFailedMessageBytes() { - java.lang.Object ref = failedMessage_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - failedMessage_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + result = result && (hasProgress() == other.hasProgress()); + if (hasProgress()) { + result = result && (getProgress() + == other.getProgress()); } - /** - * optional string failed_message = 6; - */ - public Builder setFailedMessage( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - failedMessage_ = value; - onChanged(); - return this; + result = result && (hasJobId() == other.hasJobId()); + if (hasJobId()) { + result = result && getJobId() + .equals(other.getJobId()); } - /** - * optional string failed_message = 6; - */ - public Builder clearFailedMessage() { - bitField0_ = (bitField0_ & ~0x00000020); - failedMessage_ = getDefaultInstance().getFailedMessage(); - onChanged(); - return this; + result = result && (hasWorkersNumber() == other.hasWorkersNumber()); + if (hasWorkersNumber()) { + result = result && (getWorkersNumber() + == other.getWorkersNumber()); } - /** - * optional string failed_message = 6; - */ - public Builder setFailedMessageBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - failedMessage_ = value; - onChanged(); - return this; + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } - // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - private java.util.List tableBackupStatus_ = - java.util.Collections.emptyList(); - private void ensureTableBackupStatusIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { - tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_); - bitField0_ |= 0x00000040; - } + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_; - - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public java.util.List getTableBackupStatusList() { - if (tableBackupStatusBuilder_ == null) { - return java.util.Collections.unmodifiableList(tableBackupStatus_); - } else { - return tableBackupStatusBuilder_.getMessageList(); - } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public int getTableBackupStatusCount() { - if (tableBackupStatusBuilder_ == null) { - return tableBackupStatus_.size(); - } else { - return tableBackupStatusBuilder_.getCount(); - } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { - if (tableBackupStatusBuilder_ == null) { - return tableBackupStatus_.get(index); - } else { - return tableBackupStatusBuilder_.getMessage(index); - } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder setTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { - if (tableBackupStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.set(index, value); - onChanged(); - } else { - tableBackupStatusBuilder_.setMessage(index, value); - } - return this; + if (hasPhase()) { + hash = (37 * hash) + PHASE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getPhase()); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder setTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.set(index, builderForValue.build()); - onChanged(); - } else { - tableBackupStatusBuilder_.setMessage(index, builderForValue.build()); - } - return this; + if (hasFailedMessage()) { + hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getFailedMessage().hashCode(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { - if (tableBackupStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(value); - onChanged(); - } else { - tableBackupStatusBuilder_.addMessage(value); - } - return this; + if (getTableBackupStatusCount() > 0) { + hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getTableBackupStatusList().hashCode(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder addTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { - if (tableBackupStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(index, value); - onChanged(); - } else { - tableBackupStatusBuilder_.addMessage(index, value); - } - return this; + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder addTableBackupStatus( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(builderForValue.build()); - onChanged(); - } else { - tableBackupStatusBuilder_.addMessage(builderForValue.build()); - } - return this; + if (hasEndTs()) { + hash = (37 * hash) + END_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getEndTs()); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder addTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(index, builderForValue.build()); - onChanged(); - } else { - tableBackupStatusBuilder_.addMessage(index, builderForValue.build()); - } - return this; + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder addAllTableBackupStatus( - java.lang.Iterable values) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - super.addAll(values, tableBackupStatus_); - onChanged(); - } else { - tableBackupStatusBuilder_.addAllMessages(values); - } - return this; + if (hasJobId()) { + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder clearTableBackupStatus() { - if (tableBackupStatusBuilder_ == null) { - tableBackupStatus_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - onChanged(); - } else { - tableBackupStatusBuilder_.clear(); - } - return this; + if (hasWorkersNumber()) { + hash = (37 * hash) + WORKERS_NUMBER_FIELD_NUMBER; + hash = (53 * hash) + getWorkersNumber(); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder removeTableBackupStatus(int index) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.remove(index); - onChanged(); - } else { - tableBackupStatusBuilder_.remove(index); - } - return this; + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder( - int index) { - return getTableBackupStatusFieldBuilder().getBuilder(index); + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( - int index) { - if (tableBackupStatusBuilder_ == null) { - return tableBackupStatus_.get(index); } else { - return tableBackupStatusBuilder_.getMessageOrBuilder(index); - } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public java.util.List - getTableBackupStatusOrBuilderList() { - if (tableBackupStatusBuilder_ != null) { - return tableBackupStatusBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tableBackupStatus_); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableBackupStatusFieldBuilder(); } } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() { - return getTableBackupStatusFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder( - int index) { - return getTableBackupStatusFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public java.util.List - getTableBackupStatusBuilderList() { - return getTableBackupStatusFieldBuilder().getBuilderList(); + private static Builder create() { + return new Builder(); } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> - getTableBackupStatusFieldBuilder() { + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + bitField0_ = (bitField0_ & ~0x00000008); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + bitField0_ = (bitField0_ & ~0x00000010); + failedMessage_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); if (tableBackupStatusBuilder_ == null) { - tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>( - tableBackupStatus_, - ((bitField0_ & 0x00000040) == 0x00000040), - getParentForChildren(), - isClean()); - tableBackupStatus_ = null; + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + tableBackupStatusBuilder_.clear(); } - return tableBackupStatusBuilder_; - } - - // optional uint64 start_ts = 8; - private long startTs_ ; - /** - * optional uint64 start_ts = 8; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional uint64 start_ts = 8; - */ - public long getStartTs() { - return startTs_; - } - /** - * optional uint64 start_ts = 8; - */ - public Builder setStartTs(long value) { - bitField0_ |= 0x00000080; - startTs_ = value; - onChanged(); - return this; - } - /** - * optional uint64 start_ts = 8; - */ - public Builder clearStartTs() { - bitField0_ = (bitField0_ & ~0x00000080); startTs_ = 0L; - onChanged(); + bitField0_ = (bitField0_ & ~0x00000080); + endTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000100); + progress_ = 0; + bitField0_ = (bitField0_ & ~0x00000200); + jobId_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + workersNumber_ = 0; + bitField0_ = (bitField0_ & ~0x00000800); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00001000); return this; } - // optional uint64 end_ts = 9; - private long endTs_ ; - /** - * optional uint64 end_ts = 9; - */ - public boolean hasEndTs() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - /** - * optional uint64 end_ts = 9; - */ - public long getEndTs() { - return endTs_; - } - /** - * optional uint64 end_ts = 9; - */ - public Builder setEndTs(long value) { - bitField0_ |= 0x00000100; - endTs_ = value; - onChanged(); - return this; - } - /** - * optional uint64 end_ts = 9; - */ - public Builder clearEndTs() { - bitField0_ = (bitField0_ & ~0x00000100); - endTs_ = 0L; - onChanged(); - return this; + public Builder clone() { + return create().mergeFrom(buildPartial()); } - // optional uint32 progress = 10; - private int progress_ ; - /** - * optional uint32 progress = 10; - */ - public boolean hasProgress() { - return ((bitField0_ & 0x00000200) == 0x00000200); + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; } - /** - * optional uint32 progress = 10; - */ - public int getProgress() { - return progress_; + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); } - /** - * optional uint32 progress = 10; - */ - public Builder setProgress(int value) { - bitField0_ |= 0x00000200; - progress_ = value; - onChanged(); - return this; + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; } - /** - * optional uint32 progress = 10; - */ - public Builder clearProgress() { - bitField0_ = (bitField0_ & ~0x00000200); - progress_ = 0; - onChanged(); - return this; + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.phase_ = phase_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.failedMessage_ = failedMessage_; + if (tableBackupStatusBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tableBackupStatus_ = tableBackupStatus_; + } else { + result.tableBackupStatus_ = tableBackupStatusBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + result.endTs_ = endTs_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + result.progress_ = progress_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.jobId_ = jobId_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.workersNumber_ = workersNumber_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000800; + } + result.bandwidth_ = bandwidth_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - // optional string job_id = 11; - private java.lang.Object jobId_ = ""; - /** - * optional string job_id = 11; - */ - public boolean hasJobId() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - /** - * optional string job_id = 11; - */ - public java.lang.String getJobId() { - java.lang.Object ref = jobId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - jobId_ = s; - return s; + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)other); } else { - return (java.lang.String) ref; + super.mergeFrom(other); + return this; } } - /** - * optional string job_id = 11; - */ - public com.google.protobuf.ByteString - getJobIdBytes() { - java.lang.Object ref = jobId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - jobId_ = b; - return b; + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasPhase()) { + setPhase(other.getPhase()); + } + if (other.hasFailedMessage()) { + bitField0_ |= 0x00000020; + failedMessage_ = other.failedMessage_; + onChanged(); + } + if (tableBackupStatusBuilder_ == null) { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatus_.isEmpty()) { + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.addAll(other.tableBackupStatus_); + } + onChanged(); + } } else { - return (com.google.protobuf.ByteString) ref; + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatusBuilder_.isEmpty()) { + tableBackupStatusBuilder_.dispose(); + tableBackupStatusBuilder_ = null; + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + tableBackupStatusBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableBackupStatusFieldBuilder() : null; + } else { + tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_); + } + } } - } - /** - * optional string job_id = 11; - */ - public Builder setJobId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - jobId_ = value; - onChanged(); - return this; - } - /** - * optional string job_id = 11; - */ - public Builder clearJobId() { - bitField0_ = (bitField0_ & ~0x00000400); - jobId_ = getDefaultInstance().getJobId(); - onChanged(); - return this; - } - /** - * optional string job_id = 11; - */ - public Builder setJobIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - jobId_ = value; - onChanged(); + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasEndTs()) { + setEndTs(other.getEndTs()); + } + if (other.hasProgress()) { + setProgress(other.getProgress()); + } + if (other.hasJobId()) { + bitField0_ |= 0x00000400; + jobId_ = other.jobId_; + onChanged(); + } + if (other.hasWorkersNumber()) { + setWorkersNumber(other.getWorkersNumber()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); + } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - // required uint32 workers_number = 12; - private int workersNumber_ ; - /** - * required uint32 workers_number = 12; - */ - public boolean hasWorkersNumber() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * required uint32 workers_number = 12; - */ - public int getWorkersNumber() { - return workersNumber_; + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + if (!hasWorkersNumber()) { + + return false; + } + if (!hasBandwidth()) { + + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + + return false; + } + } + return true; } - /** - * required uint32 workers_number = 12; - */ - public Builder setWorkersNumber(int value) { - bitField0_ |= 0x00000800; - workersNumber_ = value; - onChanged(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } return this; } - /** - * required uint32 workers_number = 12; - */ - public Builder clearWorkersNumber() { - bitField0_ = (bitField0_ & ~0x00000800); - workersNumber_ = 0; - onChanged(); - return this; + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - - // required uint64 bandwidth = 13; - private long bandwidth_ ; /** - * required uint64 bandwidth = 13; + * required string backup_id = 1; */ - public boolean hasBandwidth() { - return ((bitField0_ & 0x00001000) == 0x00001000); + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } } /** - * required uint64 bandwidth = 13; + * required string backup_id = 1; */ - public long getBandwidth() { - return bandwidth_; + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } /** - * required uint64 bandwidth = 13; + * required string backup_id = 1; */ - public Builder setBandwidth(long value) { - bitField0_ |= 0x00001000; - bandwidth_ = value; + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } /** - * required uint64 bandwidth = 13; + * required string backup_id = 1; */ - public Builder clearBandwidth() { - bitField0_ = (bitField0_ & ~0x00001000); - bandwidth_ = 0L; + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); onChanged(); return this; } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupInfo) - } - - static { - defaultInstance = new BackupInfo(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.BackupInfo) - } - - public interface BackupProcContextOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.BackupInfo ctx = 1; - /** - * required .hbase.pb.BackupInfo ctx = 1; - */ - boolean hasCtx(); - /** - * required .hbase.pb.BackupInfo ctx = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx(); - /** - * required .hbase.pb.BackupInfo ctx = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder(); - - // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - java.util.List - getServerTimestampList(); - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - int getServerTimestampCount(); - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - java.util.List - getServerTimestampOrBuilderList(); - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( - int index); + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); } - /** - * Protobuf type {@code hbase.pb.BackupProcContext} - */ - public static final class BackupProcContext extends - com.google.protobuf.GeneratedMessage - implements BackupProcContextOrBuilder { - // Use BackupProcContext.newBuilder() to construct. - private BackupProcContext(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BackupProcContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BackupProcContext defaultInstance; - public static BackupProcContext getDefaultInstance() { - return defaultInstance; - } - - public BackupProcContext getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BackupProcContext( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = ctx_.toBuilder(); - } - ctx_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(ctx_); - ctx_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000001; - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupProcContext parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupProcContext(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required .hbase.pb.BackupInfo ctx = 1; - public static final int CTX_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo ctx_; - /** - * required .hbase.pb.BackupInfo ctx = 1; - */ - public boolean hasCtx() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.BackupInfo ctx = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx() { - return ctx_; - } - /** - * required .hbase.pb.BackupInfo ctx = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder() { - return ctx_; - } - - // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; - private java.util.List serverTimestamp_; - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public java.util.List getServerTimestampList() { - return serverTimestamp_; - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public java.util.List - getServerTimestampOrBuilderList() { - return serverTimestamp_; - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public int getServerTimestampCount() { - return serverTimestamp_.size(); - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { - return serverTimestamp_.get(index); - } - /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( - int index) { - return serverTimestamp_.get(index); - } - - private void initFields() { - ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); - serverTimestamp_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } - if (!hasCtx()) { - memoizedIsInitialized = 0; - return false; + // required .hbase.pb.BackupType type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - if (!getCtx().isInitialized()) { - memoizedIsInitialized = 0; - return false; + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; } - for (int i = 0; i < getServerTimestampCount(); i++) { - if (!getServerTimestamp(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, ctx_); - } - for (int i = 0; i < serverTimestamp_.size(); i++) { - output.writeMessage(2, serverTimestamp_.get(i)); + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; } - getUnknownFields().writeTo(output); - } - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, ctx_); + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); } - for (int i = 0; i < serverTimestamp_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, serverTimestamp_.get(i)); + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext)) { - return super.equals(obj); + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) obj; - - boolean result = true; - result = result && (hasCtx() == other.hasCtx()); - if (hasCtx()) { - result = result && getCtx() - .equals(other.getCtx()); + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; } - result = result && getServerTimestampList() - .equals(other.getServerTimestampList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasCtx()) { - hash = (37 * hash) + CTX_FIELD_NUMBER; - hash = (53 * hash) + getCtx().hashCode(); + + // optional .hbase.pb.BackupInfo.BackupState state = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); } - if (getServerTimestampCount() > 0) { - hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + getServerTimestampList().hashCode(); + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { + return state_; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.BackupProcContext} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContextOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + state_ = value; + onChanged(); + return this; } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.Builder.class); + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000008); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + onChanged(); + return this; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { + return phase_; } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getCtxFieldBuilder(); - getServerTimestampFieldBuilder(); + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value) { + if (value == null) { + throw new NullPointerException(); } + bitField0_ |= 0x00000010; + phase_ = value; + onChanged(); + return this; } - private static Builder create() { - return new Builder(); + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public Builder clearPhase() { + bitField0_ = (bitField0_ & ~0x00000010); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + onChanged(); + return this; } - public Builder clear() { - super.clear(); - if (ctxBuilder_ == null) { - ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + // optional string failed_message = 6; + private java.lang.Object failedMessage_ = ""; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + failedMessage_ = s; + return s; } else { - ctxBuilder_.clear(); + return (java.lang.String) ref; } - bitField0_ = (bitField0_ & ~0x00000001); - if (serverTimestampBuilder_ == null) { - serverTimestamp_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + } + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; } else { - serverTimestampBuilder_.clear(); + return (com.google.protobuf.ByteString) ref; } + } + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * optional string failed_message = 6; + */ + public Builder clearFailedMessage() { + bitField0_ = (bitField0_ & ~0x00000020); + failedMessage_ = getDefaultInstance().getFailedMessage(); + onChanged(); + return this; } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.getDefaultInstance(); + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + private java.util.List tableBackupStatus_ = + java.util.Collections.emptyList(); + private void ensureTableBackupStatusIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_); + bitField0_ |= 0x00000040; + } } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_; - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (ctxBuilder_ == null) { - result.ctx_ = ctx_; - } else { - result.ctx_ = ctxBuilder_.build(); - } - if (serverTimestampBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.serverTimestamp_ = serverTimestamp_; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + if (tableBackupStatusBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableBackupStatus_); } else { - result.serverTimestamp_ = serverTimestampBuilder_.build(); + return tableBackupStatusBuilder_.getMessageList(); } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext)other); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.size(); } else { - super.mergeFrom(other); - return this; + return tableBackupStatusBuilder_.getCount(); } } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.getDefaultInstance()) return this; - if (other.hasCtx()) { - mergeCtx(other.getCtx()); - } - if (serverTimestampBuilder_ == null) { - if (!other.serverTimestamp_.isEmpty()) { - if (serverTimestamp_.isEmpty()) { - serverTimestamp_ = other.serverTimestamp_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureServerTimestampIsMutable(); - serverTimestamp_.addAll(other.serverTimestamp_); - } - onChanged(); - } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); } else { - if (!other.serverTimestamp_.isEmpty()) { - if (serverTimestampBuilder_.isEmpty()) { - serverTimestampBuilder_.dispose(); - serverTimestampBuilder_ = null; - serverTimestamp_ = other.serverTimestamp_; - bitField0_ = (bitField0_ & ~0x00000002); - serverTimestampBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getServerTimestampFieldBuilder() : null; - } else { - serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasCtx()) { - - return false; - } - if (!getCtx().isInitialized()) { - - return false; - } - for (int i = 0; i < getServerTimestampCount(); i++) { - if (!getServerTimestamp(i).isInitialized()) { - - return false; - } + return tableBackupStatusBuilder_.getMessage(index); } - return true; } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, value); + onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, value); } return this; } - private int bitField0_; - - // required .hbase.pb.BackupInfo ctx = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder> ctxBuilder_; /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public boolean hasCtx() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, builderForValue.build()); + } + return this; } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx() { - if (ctxBuilder_ == null) { - return ctx_; + public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(value); + onChanged(); } else { - return ctxBuilder_.getMessage(); + tableBackupStatusBuilder_.addMessage(value); } + return this; } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public Builder setCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo value) { - if (ctxBuilder_ == null) { + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ctx_ = value; + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, value); onChanged(); } else { - ctxBuilder_.setMessage(value); + tableBackupStatusBuilder_.addMessage(index, value); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public Builder setCtx( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder builderForValue) { - if (ctxBuilder_ == null) { - ctx_ = builderForValue.build(); + public Builder addTableBackupStatus( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(builderForValue.build()); onChanged(); } else { - ctxBuilder_.setMessage(builderForValue.build()); + tableBackupStatusBuilder_.addMessage(builderForValue.build()); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public Builder mergeCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo value) { - if (ctxBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - ctx_ != org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) { - ctx_ = - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder(ctx_).mergeFrom(value).buildPartial(); - } else { - ctx_ = value; - } + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, builderForValue.build()); onChanged(); } else { - ctxBuilder_.mergeFrom(value); + tableBackupStatusBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addAllTableBackupStatus( + java.lang.Iterable values) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + super.addAll(values, tableBackupStatus_); + onChanged(); + } else { + tableBackupStatusBuilder_.addAllMessages(values); } - bitField0_ |= 0x00000001; return this; } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public Builder clearCtx() { - if (ctxBuilder_ == null) { - ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + public Builder clearTableBackupStatus() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); onChanged(); } else { - ctxBuilder_.clear(); + tableBackupStatusBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder getCtxBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getCtxFieldBuilder().getBuilder(); + public Builder removeTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.remove(index); + onChanged(); + } else { + tableBackupStatusBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); } else { + return tableBackupStatusBuilder_.getMessageOrBuilder(index); + } } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder() { - if (ctxBuilder_ != null) { - return ctxBuilder_.getMessageOrBuilder(); + public java.util.List + getTableBackupStatusOrBuilderList() { + if (tableBackupStatusBuilder_ != null) { + return tableBackupStatusBuilder_.getMessageOrBuilderList(); } else { - return ctx_; + return java.util.Collections.unmodifiableList(tableBackupStatus_); } } /** - * required .hbase.pb.BackupInfo ctx = 1; + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder> - getCtxFieldBuilder() { - if (ctxBuilder_ == null) { - ctxBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder>( - ctx_, + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() { + return getTableBackupStatusFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusBuilderList() { + return getTableBackupStatusFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> + getTableBackupStatusFieldBuilder() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>( + tableBackupStatus_, + ((bitField0_ & 0x00000040) == 0x00000040), getParentForChildren(), isClean()); - ctx_ = null; + tableBackupStatus_ = null; } - return ctxBuilder_; + return tableBackupStatusBuilder_; } - // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; - private java.util.List serverTimestamp_ = - java.util.Collections.emptyList(); - private void ensureServerTimestampIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); - bitField0_ |= 0x00000002; - } + // optional uint64 start_ts = 8; + private long startTs_ ; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000080; + startTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000080); + startTs_ = 0L; + onChanged(); + return this; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; - + // optional uint64 end_ts = 9; + private long endTs_ ; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional uint64 end_ts = 9; */ - public java.util.List getServerTimestampList() { - if (serverTimestampBuilder_ == null) { - return java.util.Collections.unmodifiableList(serverTimestamp_); - } else { - return serverTimestampBuilder_.getMessageList(); - } + public boolean hasEndTs() { + return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional uint64 end_ts = 9; */ - public int getServerTimestampCount() { - if (serverTimestampBuilder_ == null) { - return serverTimestamp_.size(); - } else { - return serverTimestampBuilder_.getCount(); - } + public long getEndTs() { + return endTs_; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional uint64 end_ts = 9; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { - if (serverTimestampBuilder_ == null) { - return serverTimestamp_.get(index); - } else { - return serverTimestampBuilder_.getMessage(index); - } + public Builder setEndTs(long value) { + bitField0_ |= 0x00000100; + endTs_ = value; + onChanged(); + return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional uint64 end_ts = 9; */ - public Builder setServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { - if (serverTimestampBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerTimestampIsMutable(); - serverTimestamp_.set(index, value); - onChanged(); - } else { - serverTimestampBuilder_.setMessage(index, value); - } + public Builder clearEndTs() { + bitField0_ = (bitField0_ & ~0x00000100); + endTs_ = 0L; + onChanged(); return this; } + + // optional uint32 progress = 10; + private int progress_ ; + /** + * optional uint32 progress = 10; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional uint32 progress = 10; + */ + public int getProgress() { + return progress_; + } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional uint32 progress = 10; */ - public Builder setServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.set(index, builderForValue.build()); - onChanged(); - } else { - serverTimestampBuilder_.setMessage(index, builderForValue.build()); - } + public Builder setProgress(int value) { + bitField0_ |= 0x00000200; + progress_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional uint32 progress = 10; */ - public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { - if (serverTimestampBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerTimestampIsMutable(); - serverTimestamp_.add(value); - onChanged(); - } else { - serverTimestampBuilder_.addMessage(value); - } + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000200); + progress_ = 0; + onChanged(); return this; } + + // optional string job_id = 11; + private java.lang.Object jobId_ = ""; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional string job_id = 11; */ - public Builder addServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { - if (serverTimestampBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureServerTimestampIsMutable(); - serverTimestamp_.add(index, value); - onChanged(); - } else { - serverTimestampBuilder_.addMessage(index, value); - } - return this; + public boolean hasJobId() { + return ((bitField0_ & 0x00000400) == 0x00000400); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional string job_id = 11; */ - public Builder addServerTimestamp( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.add(builderForValue.build()); - onChanged(); + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + jobId_ = s; + return s; } else { - serverTimestampBuilder_.addMessage(builderForValue.build()); + return (java.lang.String) ref; } - return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional string job_id = 11; */ - public Builder addServerTimestamp( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.add(index, builderForValue.build()); - onChanged(); + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; } else { - serverTimestampBuilder_.addMessage(index, builderForValue.build()); + return (com.google.protobuf.ByteString) ref; } - return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional string job_id = 11; */ - public Builder addAllServerTimestamp( - java.lang.Iterable values) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - super.addAll(values, serverTimestamp_); - onChanged(); - } else { - serverTimestampBuilder_.addAllMessages(values); - } + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + jobId_ = value; + onChanged(); return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional string job_id = 11; */ - public Builder clearServerTimestamp() { - if (serverTimestampBuilder_ == null) { - serverTimestamp_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - serverTimestampBuilder_.clear(); - } + public Builder clearJobId() { + bitField0_ = (bitField0_ & ~0x00000400); + jobId_ = getDefaultInstance().getJobId(); + onChanged(); return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * optional string job_id = 11; */ - public Builder removeServerTimestamp(int index) { - if (serverTimestampBuilder_ == null) { - ensureServerTimestampIsMutable(); - serverTimestamp_.remove(index); - onChanged(); - } else { - serverTimestampBuilder_.remove(index); - } + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + jobId_ = value; + onChanged(); return this; } + + // required uint32 workers_number = 12; + private int workersNumber_ ; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint32 workers_number = 12; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( - int index) { - return getServerTimestampFieldBuilder().getBuilder(index); + public boolean hasWorkersNumber() { + return ((bitField0_ & 0x00000800) == 0x00000800); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint32 workers_number = 12; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( - int index) { - if (serverTimestampBuilder_ == null) { - return serverTimestamp_.get(index); } else { - return serverTimestampBuilder_.getMessageOrBuilder(index); - } + public int getWorkersNumber() { + return workersNumber_; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint32 workers_number = 12; */ - public java.util.List - getServerTimestampOrBuilderList() { - if (serverTimestampBuilder_ != null) { - return serverTimestampBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(serverTimestamp_); - } + public Builder setWorkersNumber(int value) { + bitField0_ |= 0x00000800; + workersNumber_ = value; + onChanged(); + return this; } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint32 workers_number = 12; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { - return getServerTimestampFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + public Builder clearWorkersNumber() { + bitField0_ = (bitField0_ & ~0x00000800); + workersNumber_ = 0; + onChanged(); + return this; } + + // required uint64 bandwidth = 13; + private long bandwidth_ ; /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 bandwidth = 13; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( - int index) { - return getServerTimestampFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + public boolean hasBandwidth() { + return ((bitField0_ & 0x00001000) == 0x00001000); } /** - * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + * required uint64 bandwidth = 13; */ - public java.util.List - getServerTimestampBuilderList() { - return getServerTimestampFieldBuilder().getBuilderList(); + public long getBandwidth() { + return bandwidth_; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> - getServerTimestampFieldBuilder() { - if (serverTimestampBuilder_ == null) { - serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( - serverTimestamp_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - serverTimestamp_ = null; - } - return serverTimestampBuilder_; + /** + * required uint64 bandwidth = 13; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00001000; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * required uint64 bandwidth = 13; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00001000); + bandwidth_ = 0L; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupProcContext) + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupInfo) } static { - defaultInstance = new BackupProcContext(true); + defaultInstance = new BackupInfo(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupProcContext) + // @@protoc_insertion_point(class_scope:hbase.pb.BackupInfo) } private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_SnapshotTableStateData_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_BackupImage_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -10730,11 +8879,6 @@ public final class BackupProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_BackupInfo_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_BackupProcContext_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_BackupProcContext_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -10744,108 +8888,86 @@ public final class BackupProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"R\n" + - "\026SnapshotTableStateData\022\"\n\005table\030\001 \002(\0132\023" + - ".hbase.pb.TableName\022\024\n\014snapshotName\030\002 \002(" + - "\t\"\327\001\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013" + - "backup_type\030\002 \002(\0162\024.hbase.pb.BackupType\022" + - "\020\n\010root_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023." + - "hbase.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013" + - "complete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.h" + - "base.pb.BackupImage\"4\n\017ServerTimestamp\022\016" + - "\n\006server\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024Tab", - "leServerTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase" + - ".pb.TableName\0223\n\020server_timestamp\030\002 \003(\0132" + - "\031.hbase.pb.ServerTimestamp\"\220\002\n\016BackupMan" + - "ifest\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(" + - "\t\022\"\n\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n" + - "\ntable_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020" + - "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022/\n" + - "\007tst_map\030\007 \003(\0132\036.hbase.pb.TableServerTim" + - "estamp\0225\n\026dependent_backup_image\030\010 \003(\0132\025" + - ".hbase.pb.BackupImage\"]\n\021TableBackupStat", - "us\022\"\n\005table\030\001 \002(\0132\023.hbase.pb.TableName\022\022" + - "\n\ntarget_dir\030\002 \002(\t\022\020\n\010snapshot\030\003 \001(\t\"\320\004\n" + - "\nBackupInfo\022\021\n\tbackup_id\030\001 \002(\t\022\"\n\004type\030\002" + - " \002(\0162\024.hbase.pb.BackupType\022\027\n\017target_roo" + - "t_dir\030\003 \002(\t\022/\n\005state\030\004 \001(\0162 .hbase.pb.Ba" + - "ckupInfo.BackupState\022/\n\005phase\030\005 \001(\0162 .hb" + - "ase.pb.BackupInfo.BackupPhase\022\026\n\016failed_" + - "message\030\006 \001(\t\0228\n\023table_backup_status\030\007 \003" + - "(\0132\033.hbase.pb.TableBackupStatus\022\020\n\010start" + - "_ts\030\010 \001(\004\022\016\n\006end_ts\030\t \001(\004\022\020\n\010progress\030\n ", - "\001(\r\022\016\n\006job_id\030\013 \001(\t\022\026\n\016workers_number\030\014 " + - "\002(\r\022\021\n\tbandwidth\030\r \002(\004\"P\n\013BackupState\022\013\n" + - "\007WAITING\020\000\022\013\n\007RUNNING\020\001\022\014\n\010COMPLETE\020\002\022\n\n" + - "\006FAILED\020\003\022\r\n\tCANCELLED\020\004\"}\n\013BackupPhase\022" + - "\013\n\007REQUEST\020\000\022\014\n\010SNAPSHOT\020\001\022\027\n\023PREPARE_IN" + - "CREMENTAL\020\002\022\020\n\014SNAPSHOTCOPY\020\003\022\024\n\020INCREME" + - "NTAL_COPY\020\004\022\022\n\016STORE_MANIFEST\020\005\"k\n\021Backu" + - "pProcContext\022!\n\003ctx\030\001 \002(\0132\024.hbase.pb.Bac" + - "kupInfo\0223\n\020server_timestamp\030\002 \003(\0132\031.hbas" + - "e.pb.ServerTimestamp*k\n\024FullTableBackupS", - "tate\022\026\n\022PRE_SNAPSHOT_TABLE\020\001\022\023\n\017SNAPSHOT" + - "_TABLES\020\002\022\021\n\rSNAPSHOT_COPY\020\003\022\023\n\017BACKUP_C" + - "OMPLETE\020\004*f\n\033IncrementalTableBackupState" + - "\022\027\n\023PREPARE_INCREMENTAL\020\001\022\024\n\020INCREMENTAL" + - "_COPY\020\002\022\030\n\024INCR_BACKUP_COMPLETE\020\003*\'\n\nBac" + - "kupType\022\010\n\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*or" + - "g.apache.hadoop.hbase.protobuf.generated" + - "B\014BackupProtosH\001\210\001\001\240\001\001" + "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"\327\001" + + "\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013back" + + "up_type\030\002 \002(\0162\024.hbase.pb.BackupType\022\020\n\010r" + + "oot_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023.hbas" + + "e.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013comp" + + "lete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.hbase" + + ".pb.BackupImage\"4\n\017ServerTimestamp\022\016\n\006se" + + "rver\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024TableSe" + + "rverTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase.pb." + + "TableName\0223\n\020server_timestamp\030\002 \003(\0132\031.hb", + "ase.pb.ServerTimestamp\"\220\002\n\016BackupManifes" + + "t\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(\t\022\"\n" + + "\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n\ntab" + + "le_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020\n\010st" + + "art_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022/\n\007tst" + + "_map\030\007 \003(\0132\036.hbase.pb.TableServerTimesta" + + "mp\0225\n\026dependent_backup_image\030\010 \003(\0132\025.hba" + + "se.pb.BackupImage\"]\n\021TableBackupStatus\022\"" + + "\n\005table\030\001 \002(\0132\023.hbase.pb.TableName\022\022\n\nta" + + "rget_dir\030\002 \002(\t\022\020\n\010snapshot\030\003 \001(\t\"\320\004\n\nBac", + "kupInfo\022\021\n\tbackup_id\030\001 \002(\t\022\"\n\004type\030\002 \002(\016" + + "2\024.hbase.pb.BackupType\022\027\n\017target_root_di" + + "r\030\003 \002(\t\022/\n\005state\030\004 \001(\0162 .hbase.pb.Backup" + + "Info.BackupState\022/\n\005phase\030\005 \001(\0162 .hbase." + + "pb.BackupInfo.BackupPhase\022\026\n\016failed_mess" + + "age\030\006 \001(\t\0228\n\023table_backup_status\030\007 \003(\0132\033" + + ".hbase.pb.TableBackupStatus\022\020\n\010start_ts\030" + + "\010 \001(\004\022\016\n\006end_ts\030\t \001(\004\022\020\n\010progress\030\n \001(\r\022" + + "\016\n\006job_id\030\013 \001(\t\022\026\n\016workers_number\030\014 \002(\r\022" + + "\021\n\tbandwidth\030\r \002(\004\"P\n\013BackupState\022\013\n\007WAI", + "TING\020\000\022\013\n\007RUNNING\020\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAI" + + "LED\020\003\022\r\n\tCANCELLED\020\004\"}\n\013BackupPhase\022\013\n\007R" + + "EQUEST\020\000\022\014\n\010SNAPSHOT\020\001\022\027\n\023PREPARE_INCREM" + + "ENTAL\020\002\022\020\n\014SNAPSHOTCOPY\020\003\022\024\n\020INCREMENTAL" + + "_COPY\020\004\022\022\n\016STORE_MANIFEST\020\005*\'\n\nBackupTyp" + + "e\022\010\n\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*org.apac" + + "he.hadoop.hbase.protobuf.generatedB\014Back" + + "upProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; - internal_static_hbase_pb_SnapshotTableStateData_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_SnapshotTableStateData_descriptor, - new java.lang.String[] { "Table", "SnapshotName", }); internal_static_hbase_pb_BackupImage_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(0); internal_static_hbase_pb_BackupImage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupImage_descriptor, new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", }); internal_static_hbase_pb_ServerTimestamp_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(1); internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerTimestamp_descriptor, new java.lang.String[] { "Server", "Timestamp", }); internal_static_hbase_pb_TableServerTimestamp_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(2); internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableServerTimestamp_descriptor, new java.lang.String[] { "Table", "ServerTimestamp", }); internal_static_hbase_pb_BackupManifest_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(3); internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupManifest_descriptor, new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TstMap", "DependentBackupImage", }); internal_static_hbase_pb_TableBackupStatus_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableBackupStatus_descriptor, new java.lang.String[] { "Table", "TargetDir", "Snapshot", }); internal_static_hbase_pb_BackupInfo_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_BackupInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupInfo_descriptor, new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "Progress", "JobId", "WorkersNumber", "Bandwidth", }); - internal_static_hbase_pb_BackupProcContext_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_hbase_pb_BackupProcContext_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_BackupProcContext_descriptor, - new java.lang.String[] { "Ctx", "ServerTimestamp", }); return null; } }; diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 4562a7c..d495e84 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -90,88 +90,6 @@ public final class MasterProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType) } - /** - * Protobuf enum {@code hbase.pb.RestoreTablesState} - */ - public enum RestoreTablesState - implements com.google.protobuf.ProtocolMessageEnum { - /** - * VALIDATION = 1; - */ - VALIDATION(0, 1), - /** - * RESTORE_IMAGES = 2; - */ - RESTORE_IMAGES(1, 2), - ; - - /** - * VALIDATION = 1; - */ - public static final int VALIDATION_VALUE = 1; - /** - * RESTORE_IMAGES = 2; - */ - public static final int RESTORE_IMAGES_VALUE = 2; - - - public final int getNumber() { return value; } - - public static RestoreTablesState valueOf(int value) { - switch (value) { - case 1: return VALIDATION; - case 2: return RESTORE_IMAGES; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public RestoreTablesState findValueByNumber(int number) { - return RestoreTablesState.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(1); - } - - private static final RestoreTablesState[] VALUES = values(); - - public static RestoreTablesState valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private RestoreTablesState(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.RestoreTablesState) - } - public interface AddColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -58736,4572 +58654,36 @@ public final class MasterProtos { break; } case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - capabilities_.add(value); - } - break; - } - case 10: { - int length = input.readRawVarint32(); - int oldLimit = input.pushLimit(length); - while(input.getBytesUntilLimit() > 0) { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - capabilities_.add(value); - } - } - input.popLimit(oldLimit); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = java.util.Collections.unmodifiableList(capabilities_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SecurityCapabilitiesResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SecurityCapabilitiesResponse(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code hbase.pb.SecurityCapabilitiesResponse.Capability} - */ - public enum Capability - implements com.google.protobuf.ProtocolMessageEnum { - /** - * SIMPLE_AUTHENTICATION = 0; - */ - SIMPLE_AUTHENTICATION(0, 0), - /** - * SECURE_AUTHENTICATION = 1; - */ - SECURE_AUTHENTICATION(1, 1), - /** - * AUTHORIZATION = 2; - */ - AUTHORIZATION(2, 2), - /** - * CELL_AUTHORIZATION = 3; - */ - CELL_AUTHORIZATION(3, 3), - /** - * CELL_VISIBILITY = 4; - */ - CELL_VISIBILITY(4, 4), - ; - - /** - * SIMPLE_AUTHENTICATION = 0; - */ - public static final int SIMPLE_AUTHENTICATION_VALUE = 0; - /** - * SECURE_AUTHENTICATION = 1; - */ - public static final int SECURE_AUTHENTICATION_VALUE = 1; - /** - * AUTHORIZATION = 2; - */ - public static final int AUTHORIZATION_VALUE = 2; - /** - * CELL_AUTHORIZATION = 3; - */ - public static final int CELL_AUTHORIZATION_VALUE = 3; - /** - * CELL_VISIBILITY = 4; - */ - public static final int CELL_VISIBILITY_VALUE = 4; - - - public final int getNumber() { return value; } - - public static Capability valueOf(int value) { - switch (value) { - case 0: return SIMPLE_AUTHENTICATION; - case 1: return SECURE_AUTHENTICATION; - case 2: return AUTHORIZATION; - case 3: return CELL_AUTHORIZATION; - case 4: return CELL_VISIBILITY; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Capability findValueByNumber(int number) { - return Capability.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDescriptor().getEnumTypes().get(0); - } - - private static final Capability[] VALUES = values(); - - public static Capability valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private Capability(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:hbase.pb.SecurityCapabilitiesResponse.Capability) - } - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - public static final int CAPABILITIES_FIELD_NUMBER = 1; - private java.util.List capabilities_; - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public java.util.List getCapabilitiesList() { - return capabilities_; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public int getCapabilitiesCount() { - return capabilities_.size(); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { - return capabilities_.get(index); - } - - private void initFields() { - capabilities_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < capabilities_.size(); i++) { - output.writeEnum(1, capabilities_.get(i).getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < capabilities_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeEnumSizeNoTag(capabilities_.get(i).getNumber()); - } - size += dataSize; - size += 1 * capabilities_.size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) obj; - - boolean result = true; - result = result && getCapabilitiesList() - .equals(other.getCapabilitiesList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getCapabilitiesCount() > 0) { - hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; - hash = (53 * hash) + hashEnumList(getCapabilitiesList()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - capabilities_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = java.util.Collections.unmodifiableList(capabilities_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.capabilities_ = capabilities_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()) return this; - if (!other.capabilities_.isEmpty()) { - if (capabilities_.isEmpty()) { - capabilities_ = other.capabilities_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureCapabilitiesIsMutable(); - capabilities_.addAll(other.capabilities_); - } - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - private java.util.List capabilities_ = - java.util.Collections.emptyList(); - private void ensureCapabilitiesIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - capabilities_ = new java.util.ArrayList(capabilities_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public java.util.List getCapabilitiesList() { - return java.util.Collections.unmodifiableList(capabilities_); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public int getCapabilitiesCount() { - return capabilities_.size(); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { - return capabilities_.get(index); - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder setCapabilities( - int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { - if (value == null) { - throw new NullPointerException(); - } - ensureCapabilitiesIsMutable(); - capabilities_.set(index, value); - onChanged(); - return this; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder addCapabilities(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { - if (value == null) { - throw new NullPointerException(); - } - ensureCapabilitiesIsMutable(); - capabilities_.add(value); - onChanged(); - return this; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder addAllCapabilities( - java.lang.Iterable values) { - ensureCapabilitiesIsMutable(); - super.addAll(values, capabilities_); - onChanged(); - return this; - } - /** - * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; - */ - public Builder clearCapabilities() { - capabilities_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesResponse) - } - - static { - defaultInstance = new SecurityCapabilitiesResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) - } - - public interface BackupTablesRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .hbase.pb.BackupType type = 1; - /** - * required .hbase.pb.BackupType type = 1; - */ - boolean hasType(); - /** - * required .hbase.pb.BackupType type = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); - - // repeated .hbase.pb.TableName tables = 2; - /** - * repeated .hbase.pb.TableName tables = 2; - */ - java.util.List - getTablesList(); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - int getTablesCount(); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - java.util.List - getTablesOrBuilderList(); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( - int index); - - // required string target_root_dir = 3; - /** - * required string target_root_dir = 3; - */ - boolean hasTargetRootDir(); - /** - * required string target_root_dir = 3; - */ - java.lang.String getTargetRootDir(); - /** - * required string target_root_dir = 3; - */ - com.google.protobuf.ByteString - getTargetRootDirBytes(); - - // optional int64 workers = 4; - /** - * optional int64 workers = 4; - */ - boolean hasWorkers(); - /** - * optional int64 workers = 4; - */ - long getWorkers(); - - // optional int64 bandwidth = 5; - /** - * optional int64 bandwidth = 5; - */ - boolean hasBandwidth(); - /** - * optional int64 bandwidth = 5; - */ - long getBandwidth(); - - // optional string backup_set_name = 6; - /** - * optional string backup_set_name = 6; - */ - boolean hasBackupSetName(); - /** - * optional string backup_set_name = 6; - */ - java.lang.String getBackupSetName(); - /** - * optional string backup_set_name = 6; - */ - com.google.protobuf.ByteString - getBackupSetNameBytes(); - - // optional uint64 nonce_group = 7 [default = 0]; - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - boolean hasNonceGroup(); - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - long getNonceGroup(); - - // optional uint64 nonce = 8 [default = 0]; - /** - * optional uint64 nonce = 8 [default = 0]; - */ - boolean hasNonce(); - /** - * optional uint64 nonce = 8 [default = 0]; - */ - long getNonce(); - } - /** - * Protobuf type {@code hbase.pb.BackupTablesRequest} - */ - public static final class BackupTablesRequest extends - com.google.protobuf.GeneratedMessage - implements BackupTablesRequestOrBuilder { - // Use BackupTablesRequest.newBuilder() to construct. - private BackupTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BackupTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BackupTablesRequest defaultInstance; - public static BackupTablesRequest getDefaultInstance() { - return defaultInstance; - } - - public BackupTablesRequest getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BackupTablesRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); - break; - } - case 26: { - bitField0_ |= 0x00000002; - targetRootDir_ = input.readBytes(); - break; - } - case 32: { - bitField0_ |= 0x00000004; - workers_ = input.readInt64(); - break; - } - case 40: { - bitField0_ |= 0x00000008; - bandwidth_ = input.readInt64(); - break; - } - case 50: { - bitField0_ |= 0x00000010; - backupSetName_ = input.readBytes(); - break; - } - case 56: { - bitField0_ |= 0x00000020; - nonceGroup_ = input.readUInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000040; - nonce_ = input.readUInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = java.util.Collections.unmodifiableList(tables_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupTablesRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupTablesRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required .hbase.pb.BackupType type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; - /** - * required .hbase.pb.BackupType type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.BackupType type = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; - } - - // repeated .hbase.pb.TableName tables = 2; - public static final int TABLES_FIELD_NUMBER = 2; - private java.util.List tables_; - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List getTablesList() { - return tables_; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List - getTablesOrBuilderList() { - return tables_; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public int getTablesCount() { - return tables_.size(); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { - return tables_.get(index); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( - int index) { - return tables_.get(index); - } - - // required string target_root_dir = 3; - public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; - private java.lang.Object targetRootDir_; - /** - * required string target_root_dir = 3; - */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string target_root_dir = 3; - */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - targetRootDir_ = s; - } - return s; - } - } - /** - * required string target_root_dir = 3; - */ - public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional int64 workers = 4; - public static final int WORKERS_FIELD_NUMBER = 4; - private long workers_; - /** - * optional int64 workers = 4; - */ - public boolean hasWorkers() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional int64 workers = 4; - */ - public long getWorkers() { - return workers_; - } - - // optional int64 bandwidth = 5; - public static final int BANDWIDTH_FIELD_NUMBER = 5; - private long bandwidth_; - /** - * optional int64 bandwidth = 5; - */ - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional int64 bandwidth = 5; - */ - public long getBandwidth() { - return bandwidth_; - } - - // optional string backup_set_name = 6; - public static final int BACKUP_SET_NAME_FIELD_NUMBER = 6; - private java.lang.Object backupSetName_; - /** - * optional string backup_set_name = 6; - */ - public boolean hasBackupSetName() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional string backup_set_name = 6; - */ - public java.lang.String getBackupSetName() { - java.lang.Object ref = backupSetName_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupSetName_ = s; - } - return s; - } - } - /** - * optional string backup_set_name = 6; - */ - public com.google.protobuf.ByteString - getBackupSetNameBytes() { - java.lang.Object ref = backupSetName_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupSetName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional uint64 nonce_group = 7 [default = 0]; - public static final int NONCE_GROUP_FIELD_NUMBER = 7; - private long nonceGroup_; - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public long getNonceGroup() { - return nonceGroup_; - } - - // optional uint64 nonce = 8 [default = 0]; - public static final int NONCE_FIELD_NUMBER = 8; - private long nonce_; - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public boolean hasNonce() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public long getNonce() { - return nonce_; - } - - private void initFields() { - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - tables_ = java.util.Collections.emptyList(); - targetRootDir_ = ""; - workers_ = 0L; - bandwidth_ = 0L; - backupSetName_ = ""; - nonceGroup_ = 0L; - nonce_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTargetRootDir()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTablesCount(); i++) { - if (!getTables(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); - } - for (int i = 0; i < tables_.size(); i++) { - output.writeMessage(2, tables_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(3, getTargetRootDirBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(4, workers_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(5, bandwidth_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBytes(6, getBackupSetNameBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(7, nonceGroup_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt64(8, nonce_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); - } - for (int i = 0; i < tables_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, tables_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getTargetRootDirBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, workers_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(5, bandwidth_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(6, getBackupSetNameBytes()); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, nonceGroup_); - } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, nonce_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; - - boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && getTablesList() - .equals(other.getTablesList()); - result = result && (hasTargetRootDir() == other.hasTargetRootDir()); - if (hasTargetRootDir()) { - result = result && getTargetRootDir() - .equals(other.getTargetRootDir()); - } - result = result && (hasWorkers() == other.hasWorkers()); - if (hasWorkers()) { - result = result && (getWorkers() - == other.getWorkers()); - } - result = result && (hasBandwidth() == other.hasBandwidth()); - if (hasBandwidth()) { - result = result && (getBandwidth() - == other.getBandwidth()); - } - result = result && (hasBackupSetName() == other.hasBackupSetName()); - if (hasBackupSetName()) { - result = result && getBackupSetName() - .equals(other.getBackupSetName()); - } - result = result && (hasNonceGroup() == other.hasNonceGroup()); - if (hasNonceGroup()) { - result = result && (getNonceGroup() - == other.getNonceGroup()); - } - result = result && (hasNonce() == other.hasNonce()); - if (hasNonce()) { - result = result && (getNonce() - == other.getNonce()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (getTablesCount() > 0) { - hash = (37 * hash) + TABLES_FIELD_NUMBER; - hash = (53 * hash) + getTablesList().hashCode(); - } - if (hasTargetRootDir()) { - hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getTargetRootDir().hashCode(); - } - if (hasWorkers()) { - hash = (37 * hash) + WORKERS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getWorkers()); - } - if (hasBandwidth()) { - hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBandwidth()); - } - if (hasBackupSetName()) { - hash = (37 * hash) + BACKUP_SET_NAME_FIELD_NUMBER; - hash = (53 * hash) + getBackupSetName().hashCode(); - } - if (hasNonceGroup()) { - hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNonceGroup()); - } - if (hasNonce()) { - hash = (37 * hash) + NONCE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNonce()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.BackupTablesRequest} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTablesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - bitField0_ = (bitField0_ & ~0x00000001); - if (tablesBuilder_ == null) { - tables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - tablesBuilder_.clear(); - } - targetRootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - workers_ = 0L; - bitField0_ = (bitField0_ & ~0x00000008); - bandwidth_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - backupSetName_ = ""; - bitField0_ = (bitField0_ & ~0x00000020); - nonceGroup_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - nonce_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.type_ = type_; - if (tablesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = java.util.Collections.unmodifiableList(tables_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.tables_ = tables_; - } else { - result.tables_ = tablesBuilder_.build(); - } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; - } - result.targetRootDir_ = targetRootDir_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.workers_ = workers_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.bandwidth_ = bandwidth_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000010; - } - result.backupSetName_ = backupSetName_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000020; - } - result.nonceGroup_ = nonceGroup_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; - } - result.nonce_ = nonce_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); - } - if (tablesBuilder_ == null) { - if (!other.tables_.isEmpty()) { - if (tables_.isEmpty()) { - tables_ = other.tables_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureTablesIsMutable(); - tables_.addAll(other.tables_); - } - onChanged(); - } - } else { - if (!other.tables_.isEmpty()) { - if (tablesBuilder_.isEmpty()) { - tablesBuilder_.dispose(); - tablesBuilder_ = null; - tables_ = other.tables_; - bitField0_ = (bitField0_ & ~0x00000002); - tablesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTablesFieldBuilder() : null; - } else { - tablesBuilder_.addAllMessages(other.tables_); - } - } - } - if (other.hasTargetRootDir()) { - bitField0_ |= 0x00000004; - targetRootDir_ = other.targetRootDir_; - onChanged(); - } - if (other.hasWorkers()) { - setWorkers(other.getWorkers()); - } - if (other.hasBandwidth()) { - setBandwidth(other.getBandwidth()); - } - if (other.hasBackupSetName()) { - bitField0_ |= 0x00000020; - backupSetName_ = other.backupSetName_; - onChanged(); - } - if (other.hasNonceGroup()) { - setNonceGroup(other.getNonceGroup()); - } - if (other.hasNonce()) { - setNonce(other.getNonce()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - if (!hasTargetRootDir()) { - - return false; - } - for (int i = 0; i < getTablesCount(); i++) { - if (!getTables(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .hbase.pb.BackupType type = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - /** - * required .hbase.pb.BackupType type = 1; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .hbase.pb.BackupType type = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; - } - /** - * required .hbase.pb.BackupType type = 1; - */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - type_ = value; - onChanged(); - return this; - } - /** - * required .hbase.pb.BackupType type = 1; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - onChanged(); - return this; - } - - // repeated .hbase.pb.TableName tables = 2; - private java.util.List tables_ = - java.util.Collections.emptyList(); - private void ensureTablesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = new java.util.ArrayList(tables_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; - - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List getTablesList() { - if (tablesBuilder_ == null) { - return java.util.Collections.unmodifiableList(tables_); - } else { - return tablesBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public int getTablesCount() { - if (tablesBuilder_ == null) { - return tables_.size(); - } else { - return tablesBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { - if (tablesBuilder_ == null) { - return tables_.get(index); - } else { - return tablesBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder setTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTablesIsMutable(); - tables_.set(index, value); - onChanged(); - } else { - tablesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder setTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.set(index, builderForValue.build()); - onChanged(); - } else { - tablesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTablesIsMutable(); - tables_.add(value); - onChanged(); - } else { - tablesBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTablesIsMutable(); - tables_.add(index, value); - onChanged(); - } else { - tablesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.add(builderForValue.build()); - onChanged(); - } else { - tablesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.add(index, builderForValue.build()); - onChanged(); - } else { - tablesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addAllTables( - java.lang.Iterable values) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - super.addAll(values, tables_); - onChanged(); - } else { - tablesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder clearTables() { - if (tablesBuilder_ == null) { - tables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - tablesBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder removeTables(int index) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.remove(index); - onChanged(); - } else { - tablesBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( - int index) { - return getTablesFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( - int index) { - if (tablesBuilder_ == null) { - return tables_.get(index); } else { - return tablesBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List - getTablesOrBuilderList() { - if (tablesBuilder_ != null) { - return tablesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tables_); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { - return getTablesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( - int index) { - return getTablesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List - getTablesBuilderList() { - return getTablesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTablesFieldBuilder() { - if (tablesBuilder_ == null) { - tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - tables_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - tables_ = null; - } - return tablesBuilder_; - } - - // required string target_root_dir = 3; - private java.lang.Object targetRootDir_ = ""; - /** - * required string target_root_dir = 3; - */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string target_root_dir = 3; - */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - targetRootDir_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string target_root_dir = 3; - */ - public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string target_root_dir = 3; - */ - public Builder setTargetRootDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - targetRootDir_ = value; - onChanged(); - return this; - } - /** - * required string target_root_dir = 3; - */ - public Builder clearTargetRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - targetRootDir_ = getDefaultInstance().getTargetRootDir(); - onChanged(); - return this; - } - /** - * required string target_root_dir = 3; - */ - public Builder setTargetRootDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - targetRootDir_ = value; - onChanged(); - return this; - } - - // optional int64 workers = 4; - private long workers_ ; - /** - * optional int64 workers = 4; - */ - public boolean hasWorkers() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional int64 workers = 4; - */ - public long getWorkers() { - return workers_; - } - /** - * optional int64 workers = 4; - */ - public Builder setWorkers(long value) { - bitField0_ |= 0x00000008; - workers_ = value; - onChanged(); - return this; - } - /** - * optional int64 workers = 4; - */ - public Builder clearWorkers() { - bitField0_ = (bitField0_ & ~0x00000008); - workers_ = 0L; - onChanged(); - return this; - } - - // optional int64 bandwidth = 5; - private long bandwidth_ ; - /** - * optional int64 bandwidth = 5; - */ - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional int64 bandwidth = 5; - */ - public long getBandwidth() { - return bandwidth_; - } - /** - * optional int64 bandwidth = 5; - */ - public Builder setBandwidth(long value) { - bitField0_ |= 0x00000010; - bandwidth_ = value; - onChanged(); - return this; - } - /** - * optional int64 bandwidth = 5; - */ - public Builder clearBandwidth() { - bitField0_ = (bitField0_ & ~0x00000010); - bandwidth_ = 0L; - onChanged(); - return this; - } - - // optional string backup_set_name = 6; - private java.lang.Object backupSetName_ = ""; - /** - * optional string backup_set_name = 6; - */ - public boolean hasBackupSetName() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional string backup_set_name = 6; - */ - public java.lang.String getBackupSetName() { - java.lang.Object ref = backupSetName_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupSetName_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string backup_set_name = 6; - */ - public com.google.protobuf.ByteString - getBackupSetNameBytes() { - java.lang.Object ref = backupSetName_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupSetName_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string backup_set_name = 6; - */ - public Builder setBackupSetName( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - backupSetName_ = value; - onChanged(); - return this; - } - /** - * optional string backup_set_name = 6; - */ - public Builder clearBackupSetName() { - bitField0_ = (bitField0_ & ~0x00000020); - backupSetName_ = getDefaultInstance().getBackupSetName(); - onChanged(); - return this; - } - /** - * optional string backup_set_name = 6; - */ - public Builder setBackupSetNameBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - backupSetName_ = value; - onChanged(); - return this; - } - - // optional uint64 nonce_group = 7 [default = 0]; - private long nonceGroup_ ; - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public long getNonceGroup() { - return nonceGroup_; - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public Builder setNonceGroup(long value) { - bitField0_ |= 0x00000040; - nonceGroup_ = value; - onChanged(); - return this; - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public Builder clearNonceGroup() { - bitField0_ = (bitField0_ & ~0x00000040); - nonceGroup_ = 0L; - onChanged(); - return this; - } - - // optional uint64 nonce = 8 [default = 0]; - private long nonce_ ; - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public boolean hasNonce() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public long getNonce() { - return nonce_; - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public Builder setNonce(long value) { - bitField0_ |= 0x00000080; - nonce_ = value; - onChanged(); - return this; - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public Builder clearNonce() { - bitField0_ = (bitField0_ & ~0x00000080); - nonce_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) - } - - static { - defaultInstance = new BackupTablesRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) - } - - public interface BackupTablesResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional uint64 proc_id = 1; - /** - * optional uint64 proc_id = 1; - */ - boolean hasProcId(); - /** - * optional uint64 proc_id = 1; - */ - long getProcId(); - - // optional string backup_id = 2; - /** - * optional string backup_id = 2; - */ - boolean hasBackupId(); - /** - * optional string backup_id = 2; - */ - java.lang.String getBackupId(); - /** - * optional string backup_id = 2; - */ - com.google.protobuf.ByteString - getBackupIdBytes(); - } - /** - * Protobuf type {@code hbase.pb.BackupTablesResponse} - */ - public static final class BackupTablesResponse extends - com.google.protobuf.GeneratedMessage - implements BackupTablesResponseOrBuilder { - // Use BackupTablesResponse.newBuilder() to construct. - private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BackupTablesResponse defaultInstance; - public static BackupTablesResponse getDefaultInstance() { - return defaultInstance; - } - - public BackupTablesResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private BackupTablesResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - procId_ = input.readUInt64(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - backupId_ = input.readBytes(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupTablesResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupTablesResponse(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional uint64 proc_id = 1; - public static final int PROC_ID_FIELD_NUMBER = 1; - private long procId_; - /** - * optional uint64 proc_id = 1; - */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional uint64 proc_id = 1; - */ - public long getProcId() { - return procId_; - } - - // optional string backup_id = 2; - public static final int BACKUP_ID_FIELD_NUMBER = 2; - private java.lang.Object backupId_; - /** - * optional string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } - /** - * optional string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private void initFields() { - procId_ = 0L; - backupId_ = ""; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, procId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBackupIdBytes()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, procId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBackupIdBytes()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; - - boolean result = true; - result = result && (hasProcId() == other.hasProcId()); - if (hasProcId()) { - result = result && (getProcId() - == other.getProcId()); - } - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcId()) { - hash = (37 * hash) + PROC_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getProcId()); - } - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.BackupTablesResponse} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - procId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.procId_ = procId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.backupId_ = backupId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; - if (other.hasProcId()) { - setProcId(other.getProcId()); - } - if (other.hasBackupId()) { - bitField0_ |= 0x00000002; - backupId_ = other.backupId_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional uint64 proc_id = 1; - private long procId_ ; - /** - * optional uint64 proc_id = 1; - */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional uint64 proc_id = 1; - */ - public long getProcId() { - return procId_; - } - /** - * optional uint64 proc_id = 1; - */ - public Builder setProcId(long value) { - bitField0_ |= 0x00000001; - procId_ = value; - onChanged(); - return this; - } - /** - * optional uint64 proc_id = 1; - */ - public Builder clearProcId() { - bitField0_ = (bitField0_ & ~0x00000001); - procId_ = 0L; - onChanged(); - return this; - } - - // optional string backup_id = 2; - private java.lang.Object backupId_ = ""; - /** - * optional string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string backup_id = 2; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - /** - * optional string backup_id = 2; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000002); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; - } - /** - * optional string backup_id = 2; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) - } - - static { - defaultInstance = new BackupTablesResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) - } - - public interface RestoreTablesRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string backup_id = 1; - /** - * required string backup_id = 1; - */ - boolean hasBackupId(); - /** - * required string backup_id = 1; - */ - java.lang.String getBackupId(); - /** - * required string backup_id = 1; - */ - com.google.protobuf.ByteString - getBackupIdBytes(); - - // repeated .hbase.pb.TableName tables = 2; - /** - * repeated .hbase.pb.TableName tables = 2; - */ - java.util.List - getTablesList(); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - int getTablesCount(); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - java.util.List - getTablesOrBuilderList(); - /** - * repeated .hbase.pb.TableName tables = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( - int index); - - // repeated .hbase.pb.TableName target_tables = 3; - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - java.util.List - getTargetTablesList(); - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index); - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - int getTargetTablesCount(); - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - java.util.List - getTargetTablesOrBuilderList(); - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( - int index); - - // required string backup_root_dir = 4; - /** - * required string backup_root_dir = 4; - */ - boolean hasBackupRootDir(); - /** - * required string backup_root_dir = 4; - */ - java.lang.String getBackupRootDir(); - /** - * required string backup_root_dir = 4; - */ - com.google.protobuf.ByteString - getBackupRootDirBytes(); - - // optional bool dependency_check_only = 5; - /** - * optional bool dependency_check_only = 5; - */ - boolean hasDependencyCheckOnly(); - /** - * optional bool dependency_check_only = 5; - */ - boolean getDependencyCheckOnly(); - - // optional bool overwrite = 6; - /** - * optional bool overwrite = 6; - */ - boolean hasOverwrite(); - /** - * optional bool overwrite = 6; - */ - boolean getOverwrite(); - - // optional uint64 nonce_group = 7 [default = 0]; - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - boolean hasNonceGroup(); - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - long getNonceGroup(); - - // optional uint64 nonce = 8 [default = 0]; - /** - * optional uint64 nonce = 8 [default = 0]; - */ - boolean hasNonce(); - /** - * optional uint64 nonce = 8 [default = 0]; - */ - long getNonce(); - } - /** - * Protobuf type {@code hbase.pb.RestoreTablesRequest} - */ - public static final class RestoreTablesRequest extends - com.google.protobuf.GeneratedMessage - implements RestoreTablesRequestOrBuilder { - // Use RestoreTablesRequest.newBuilder() to construct. - private RestoreTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RestoreTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RestoreTablesRequest defaultInstance; - public static RestoreTablesRequest getDefaultInstance() { - return defaultInstance; - } - - public RestoreTablesRequest getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RestoreTablesRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - backupId_ = input.readBytes(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); - break; - } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - targetTables_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - targetTables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); - break; - } - case 34: { - bitField0_ |= 0x00000002; - backupRootDir_ = input.readBytes(); - break; - } - case 40: { - bitField0_ |= 0x00000004; - dependencyCheckOnly_ = input.readBool(); - break; - } - case 48: { - bitField0_ |= 0x00000008; - overwrite_ = input.readBool(); - break; - } - case 56: { - bitField0_ |= 0x00000010; - nonceGroup_ = input.readUInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000020; - nonce_ = input.readUInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = java.util.Collections.unmodifiableList(tables_); - } - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - targetTables_ = java.util.Collections.unmodifiableList(targetTables_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RestoreTablesRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RestoreTablesRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required string backup_id = 1; - public static final int BACKUP_ID_FIELD_NUMBER = 1; - private java.lang.Object backupId_; - /** - * required string backup_id = 1; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string backup_id = 1; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } - /** - * required string backup_id = 1; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // repeated .hbase.pb.TableName tables = 2; - public static final int TABLES_FIELD_NUMBER = 2; - private java.util.List tables_; - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List getTablesList() { - return tables_; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List - getTablesOrBuilderList() { - return tables_; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public int getTablesCount() { - return tables_.size(); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { - return tables_.get(index); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( - int index) { - return tables_.get(index); - } - - // repeated .hbase.pb.TableName target_tables = 3; - public static final int TARGET_TABLES_FIELD_NUMBER = 3; - private java.util.List targetTables_; - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public java.util.List getTargetTablesList() { - return targetTables_; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public java.util.List - getTargetTablesOrBuilderList() { - return targetTables_; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public int getTargetTablesCount() { - return targetTables_.size(); - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { - return targetTables_.get(index); - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( - int index) { - return targetTables_.get(index); - } - - // required string backup_root_dir = 4; - public static final int BACKUP_ROOT_DIR_FIELD_NUMBER = 4; - private java.lang.Object backupRootDir_; - /** - * required string backup_root_dir = 4; - */ - public boolean hasBackupRootDir() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required string backup_root_dir = 4; - */ - public java.lang.String getBackupRootDir() { - java.lang.Object ref = backupRootDir_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupRootDir_ = s; - } - return s; - } - } - /** - * required string backup_root_dir = 4; - */ - public com.google.protobuf.ByteString - getBackupRootDirBytes() { - java.lang.Object ref = backupRootDir_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional bool dependency_check_only = 5; - public static final int DEPENDENCY_CHECK_ONLY_FIELD_NUMBER = 5; - private boolean dependencyCheckOnly_; - /** - * optional bool dependency_check_only = 5; - */ - public boolean hasDependencyCheckOnly() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool dependency_check_only = 5; - */ - public boolean getDependencyCheckOnly() { - return dependencyCheckOnly_; - } - - // optional bool overwrite = 6; - public static final int OVERWRITE_FIELD_NUMBER = 6; - private boolean overwrite_; - /** - * optional bool overwrite = 6; - */ - public boolean hasOverwrite() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool overwrite = 6; - */ - public boolean getOverwrite() { - return overwrite_; - } - - // optional uint64 nonce_group = 7 [default = 0]; - public static final int NONCE_GROUP_FIELD_NUMBER = 7; - private long nonceGroup_; - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public long getNonceGroup() { - return nonceGroup_; - } - - // optional uint64 nonce = 8 [default = 0]; - public static final int NONCE_FIELD_NUMBER = 8; - private long nonce_; - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public boolean hasNonce() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public long getNonce() { - return nonce_; - } - - private void initFields() { - backupId_ = ""; - tables_ = java.util.Collections.emptyList(); - targetTables_ = java.util.Collections.emptyList(); - backupRootDir_ = ""; - dependencyCheckOnly_ = false; - overwrite_ = false; - nonceGroup_ = 0L; - nonce_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBackupId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBackupRootDir()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTablesCount(); i++) { - if (!getTables(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getTargetTablesCount(); i++) { - if (!getTargetTables(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBackupIdBytes()); - } - for (int i = 0; i < tables_.size(); i++) { - output.writeMessage(2, tables_.get(i)); - } - for (int i = 0; i < targetTables_.size(); i++) { - output.writeMessage(3, targetTables_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(4, getBackupRootDirBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(5, dependencyCheckOnly_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBool(6, overwrite_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(7, nonceGroup_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeUInt64(8, nonce_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBackupIdBytes()); - } - for (int i = 0; i < tables_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, tables_.get(i)); - } - for (int i = 0; i < targetTables_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, targetTables_.get(i)); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, getBackupRootDirBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, dependencyCheckOnly_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(6, overwrite_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(7, nonceGroup_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, nonce_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) obj; - - boolean result = true; - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } - result = result && getTablesList() - .equals(other.getTablesList()); - result = result && getTargetTablesList() - .equals(other.getTargetTablesList()); - result = result && (hasBackupRootDir() == other.hasBackupRootDir()); - if (hasBackupRootDir()) { - result = result && getBackupRootDir() - .equals(other.getBackupRootDir()); - } - result = result && (hasDependencyCheckOnly() == other.hasDependencyCheckOnly()); - if (hasDependencyCheckOnly()) { - result = result && (getDependencyCheckOnly() - == other.getDependencyCheckOnly()); - } - result = result && (hasOverwrite() == other.hasOverwrite()); - if (hasOverwrite()) { - result = result && (getOverwrite() - == other.getOverwrite()); - } - result = result && (hasNonceGroup() == other.hasNonceGroup()); - if (hasNonceGroup()) { - result = result && (getNonceGroup() - == other.getNonceGroup()); - } - result = result && (hasNonce() == other.hasNonce()); - if (hasNonce()) { - result = result && (getNonce() - == other.getNonce()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - if (getTablesCount() > 0) { - hash = (37 * hash) + TABLES_FIELD_NUMBER; - hash = (53 * hash) + getTablesList().hashCode(); - } - if (getTargetTablesCount() > 0) { - hash = (37 * hash) + TARGET_TABLES_FIELD_NUMBER; - hash = (53 * hash) + getTargetTablesList().hashCode(); - } - if (hasBackupRootDir()) { - hash = (37 * hash) + BACKUP_ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getBackupRootDir().hashCode(); - } - if (hasDependencyCheckOnly()) { - hash = (37 * hash) + DEPENDENCY_CHECK_ONLY_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getDependencyCheckOnly()); - } - if (hasOverwrite()) { - hash = (37 * hash) + OVERWRITE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getOverwrite()); - } - if (hasNonceGroup()) { - hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNonceGroup()); - } - if (hasNonce()) { - hash = (37 * hash) + NONCE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getNonce()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.RestoreTablesRequest} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTablesFieldBuilder(); - getTargetTablesFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - if (tablesBuilder_ == null) { - tables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - tablesBuilder_.clear(); - } - if (targetTablesBuilder_ == null) { - targetTables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - targetTablesBuilder_.clear(); - } - backupRootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000008); - dependencyCheckOnly_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - overwrite_ = false; - bitField0_ = (bitField0_ & ~0x00000020); - nonceGroup_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - nonce_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.backupId_ = backupId_; - if (tablesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = java.util.Collections.unmodifiableList(tables_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.tables_ = tables_; - } else { - result.tables_ = tablesBuilder_.build(); - } - if (targetTablesBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - targetTables_ = java.util.Collections.unmodifiableList(targetTables_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.targetTables_ = targetTables_; - } else { - result.targetTables_ = targetTablesBuilder_.build(); - } - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000002; - } - result.backupRootDir_ = backupRootDir_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000004; - } - result.dependencyCheckOnly_ = dependencyCheckOnly_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000008; - } - result.overwrite_ = overwrite_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000010; - } - result.nonceGroup_ = nonceGroup_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000020; - } - result.nonce_ = nonce_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance()) return this; - if (other.hasBackupId()) { - bitField0_ |= 0x00000001; - backupId_ = other.backupId_; - onChanged(); - } - if (tablesBuilder_ == null) { - if (!other.tables_.isEmpty()) { - if (tables_.isEmpty()) { - tables_ = other.tables_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureTablesIsMutable(); - tables_.addAll(other.tables_); - } - onChanged(); - } - } else { - if (!other.tables_.isEmpty()) { - if (tablesBuilder_.isEmpty()) { - tablesBuilder_.dispose(); - tablesBuilder_ = null; - tables_ = other.tables_; - bitField0_ = (bitField0_ & ~0x00000002); - tablesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTablesFieldBuilder() : null; - } else { - tablesBuilder_.addAllMessages(other.tables_); - } - } - } - if (targetTablesBuilder_ == null) { - if (!other.targetTables_.isEmpty()) { - if (targetTables_.isEmpty()) { - targetTables_ = other.targetTables_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureTargetTablesIsMutable(); - targetTables_.addAll(other.targetTables_); - } - onChanged(); - } - } else { - if (!other.targetTables_.isEmpty()) { - if (targetTablesBuilder_.isEmpty()) { - targetTablesBuilder_.dispose(); - targetTablesBuilder_ = null; - targetTables_ = other.targetTables_; - bitField0_ = (bitField0_ & ~0x00000004); - targetTablesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTargetTablesFieldBuilder() : null; - } else { - targetTablesBuilder_.addAllMessages(other.targetTables_); - } - } - } - if (other.hasBackupRootDir()) { - bitField0_ |= 0x00000008; - backupRootDir_ = other.backupRootDir_; - onChanged(); - } - if (other.hasDependencyCheckOnly()) { - setDependencyCheckOnly(other.getDependencyCheckOnly()); - } - if (other.hasOverwrite()) { - setOverwrite(other.getOverwrite()); - } - if (other.hasNonceGroup()) { - setNonceGroup(other.getNonceGroup()); - } - if (other.hasNonce()) { - setNonce(other.getNonce()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasBackupId()) { - - return false; - } - if (!hasBackupRootDir()) { - - return false; - } - for (int i = 0; i < getTablesCount(); i++) { - if (!getTables(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getTargetTablesCount(); i++) { - if (!getTargetTables(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required string backup_id = 1; - private java.lang.Object backupId_ = ""; - /** - * required string backup_id = 1; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string backup_id = 1; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string backup_id = 1; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string backup_id = 1; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; - } - /** - * required string backup_id = 1; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; - } - /** - * required string backup_id = 1; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; - } - - // repeated .hbase.pb.TableName tables = 2; - private java.util.List tables_ = - java.util.Collections.emptyList(); - private void ensureTablesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - tables_ = new java.util.ArrayList(tables_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; - - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List getTablesList() { - if (tablesBuilder_ == null) { - return java.util.Collections.unmodifiableList(tables_); - } else { - return tablesBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public int getTablesCount() { - if (tablesBuilder_ == null) { - return tables_.size(); - } else { - return tablesBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { - if (tablesBuilder_ == null) { - return tables_.get(index); - } else { - return tablesBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder setTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTablesIsMutable(); - tables_.set(index, value); - onChanged(); - } else { - tablesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder setTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.set(index, builderForValue.build()); - onChanged(); - } else { - tablesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTablesIsMutable(); - tables_.add(value); - onChanged(); - } else { - tablesBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTablesIsMutable(); - tables_.add(index, value); - onChanged(); - } else { - tablesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.add(builderForValue.build()); - onChanged(); - } else { - tablesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.add(index, builderForValue.build()); - onChanged(); - } else { - tablesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder addAllTables( - java.lang.Iterable values) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - super.addAll(values, tables_); - onChanged(); - } else { - tablesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder clearTables() { - if (tablesBuilder_ == null) { - tables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - tablesBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public Builder removeTables(int index) { - if (tablesBuilder_ == null) { - ensureTablesIsMutable(); - tables_.remove(index); - onChanged(); - } else { - tablesBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( - int index) { - return getTablesFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( - int index) { - if (tablesBuilder_ == null) { - return tables_.get(index); } else { - return tablesBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List - getTablesOrBuilderList() { - if (tablesBuilder_ != null) { - return tablesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tables_); - } - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { - return getTablesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( - int index) { - return getTablesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName tables = 2; - */ - public java.util.List - getTablesBuilderList() { - return getTablesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTablesFieldBuilder() { - if (tablesBuilder_ == null) { - tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - tables_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - tables_ = null; - } - return tablesBuilder_; - } - - // repeated .hbase.pb.TableName target_tables = 3; - private java.util.List targetTables_ = - java.util.Collections.emptyList(); - private void ensureTargetTablesIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - targetTables_ = new java.util.ArrayList(targetTables_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> targetTablesBuilder_; - - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public java.util.List getTargetTablesList() { - if (targetTablesBuilder_ == null) { - return java.util.Collections.unmodifiableList(targetTables_); - } else { - return targetTablesBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public int getTargetTablesCount() { - if (targetTablesBuilder_ == null) { - return targetTables_.size(); - } else { - return targetTablesBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { - if (targetTablesBuilder_ == null) { - return targetTables_.get(index); - } else { - return targetTablesBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder setTargetTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (targetTablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetTablesIsMutable(); - targetTables_.set(index, value); - onChanged(); - } else { - targetTablesBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder setTargetTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (targetTablesBuilder_ == null) { - ensureTargetTablesIsMutable(); - targetTables_.set(index, builderForValue.build()); - onChanged(); - } else { - targetTablesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder addTargetTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (targetTablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetTablesIsMutable(); - targetTables_.add(value); - onChanged(); - } else { - targetTablesBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder addTargetTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (targetTablesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTargetTablesIsMutable(); - targetTables_.add(index, value); - onChanged(); - } else { - targetTablesBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder addTargetTables( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (targetTablesBuilder_ == null) { - ensureTargetTablesIsMutable(); - targetTables_.add(builderForValue.build()); - onChanged(); - } else { - targetTablesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder addTargetTables( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (targetTablesBuilder_ == null) { - ensureTargetTablesIsMutable(); - targetTables_.add(index, builderForValue.build()); - onChanged(); - } else { - targetTablesBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder addAllTargetTables( - java.lang.Iterable values) { - if (targetTablesBuilder_ == null) { - ensureTargetTablesIsMutable(); - super.addAll(values, targetTables_); - onChanged(); - } else { - targetTablesBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder clearTargetTables() { - if (targetTablesBuilder_ == null) { - targetTables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - targetTablesBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public Builder removeTargetTables(int index) { - if (targetTablesBuilder_ == null) { - ensureTargetTablesIsMutable(); - targetTables_.remove(index); - onChanged(); - } else { - targetTablesBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTargetTablesBuilder( - int index) { - return getTargetTablesFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( - int index) { - if (targetTablesBuilder_ == null) { - return targetTables_.get(index); } else { - return targetTablesBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public java.util.List - getTargetTablesOrBuilderList() { - if (targetTablesBuilder_ != null) { - return targetTablesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(targetTables_); - } - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder() { - return getTargetTablesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder( - int index) { - return getTargetTablesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableName target_tables = 3; - */ - public java.util.List - getTargetTablesBuilderList() { - return getTargetTablesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> - getTargetTablesFieldBuilder() { - if (targetTablesBuilder_ == null) { - targetTablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( - targetTables_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - targetTables_ = null; - } - return targetTablesBuilder_; - } - - // required string backup_root_dir = 4; - private java.lang.Object backupRootDir_ = ""; - /** - * required string backup_root_dir = 4; - */ - public boolean hasBackupRootDir() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required string backup_root_dir = 4; - */ - public java.lang.String getBackupRootDir() { - java.lang.Object ref = backupRootDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupRootDir_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string backup_root_dir = 4; - */ - public com.google.protobuf.ByteString - getBackupRootDirBytes() { - java.lang.Object ref = backupRootDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string backup_root_dir = 4; - */ - public Builder setBackupRootDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - backupRootDir_ = value; - onChanged(); - return this; - } - /** - * required string backup_root_dir = 4; - */ - public Builder clearBackupRootDir() { - bitField0_ = (bitField0_ & ~0x00000008); - backupRootDir_ = getDefaultInstance().getBackupRootDir(); - onChanged(); - return this; - } - /** - * required string backup_root_dir = 4; - */ - public Builder setBackupRootDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - backupRootDir_ = value; - onChanged(); - return this; - } - - // optional bool dependency_check_only = 5; - private boolean dependencyCheckOnly_ ; - /** - * optional bool dependency_check_only = 5; - */ - public boolean hasDependencyCheckOnly() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * optional bool dependency_check_only = 5; - */ - public boolean getDependencyCheckOnly() { - return dependencyCheckOnly_; - } - /** - * optional bool dependency_check_only = 5; - */ - public Builder setDependencyCheckOnly(boolean value) { - bitField0_ |= 0x00000010; - dependencyCheckOnly_ = value; - onChanged(); - return this; - } - /** - * optional bool dependency_check_only = 5; - */ - public Builder clearDependencyCheckOnly() { - bitField0_ = (bitField0_ & ~0x00000010); - dependencyCheckOnly_ = false; - onChanged(); - return this; - } - - // optional bool overwrite = 6; - private boolean overwrite_ ; - /** - * optional bool overwrite = 6; - */ - public boolean hasOverwrite() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional bool overwrite = 6; - */ - public boolean getOverwrite() { - return overwrite_; - } - /** - * optional bool overwrite = 6; - */ - public Builder setOverwrite(boolean value) { - bitField0_ |= 0x00000020; - overwrite_ = value; - onChanged(); - return this; - } - /** - * optional bool overwrite = 6; - */ - public Builder clearOverwrite() { - bitField0_ = (bitField0_ & ~0x00000020); - overwrite_ = false; - onChanged(); - return this; - } - - // optional uint64 nonce_group = 7 [default = 0]; - private long nonceGroup_ ; - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public boolean hasNonceGroup() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public long getNonceGroup() { - return nonceGroup_; - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public Builder setNonceGroup(long value) { - bitField0_ |= 0x00000040; - nonceGroup_ = value; - onChanged(); - return this; - } - /** - * optional uint64 nonce_group = 7 [default = 0]; - */ - public Builder clearNonceGroup() { - bitField0_ = (bitField0_ & ~0x00000040); - nonceGroup_ = 0L; - onChanged(); - return this; - } - - // optional uint64 nonce = 8 [default = 0]; - private long nonce_ ; - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public boolean hasNonce() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public long getNonce() { - return nonce_; - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public Builder setNonce(long value) { - bitField0_ |= 0x00000080; - nonce_ = value; - onChanged(); - return this; - } - /** - * optional uint64 nonce = 8 [default = 0]; - */ - public Builder clearNonce() { - bitField0_ = (bitField0_ & ~0x00000080); - nonce_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesRequest) - } - - static { - defaultInstance = new RestoreTablesRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesRequest) - } - - public interface RestoreTablesResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional uint64 proc_id = 1; - /** - * optional uint64 proc_id = 1; - */ - boolean hasProcId(); - /** - * optional uint64 proc_id = 1; - */ - long getProcId(); - } - /** - * Protobuf type {@code hbase.pb.RestoreTablesResponse} - */ - public static final class RestoreTablesResponse extends - com.google.protobuf.GeneratedMessage - implements RestoreTablesResponseOrBuilder { - // Use RestoreTablesResponse.newBuilder() to construct. - private RestoreTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RestoreTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RestoreTablesResponse defaultInstance; - public static RestoreTablesResponse getDefaultInstance() { - return defaultInstance; - } - - public RestoreTablesResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RestoreTablesResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - procId_ = input.readUInt64(); + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + capabilities_.add(value); + } + break; + } + case 10: { + int length = input.readRawVarint32(); + int oldLimit = input.pushLimit(length); + while(input.getBytesUntilLimit() > 0) { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + capabilities_.add(value); + } + } + input.popLimit(oldLimit); break; } } @@ -63312,56 +58694,173 @@ public final class MasterProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RestoreTablesResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SecurityCapabilitiesResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new RestoreTablesResponse(input, extensionRegistry); + return new SecurityCapabilitiesResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } - private int bitField0_; - // optional uint64 proc_id = 1; - public static final int PROC_ID_FIELD_NUMBER = 1; - private long procId_; /** - * optional uint64 proc_id = 1; + * Protobuf enum {@code hbase.pb.SecurityCapabilitiesResponse.Capability} */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public enum Capability + implements com.google.protobuf.ProtocolMessageEnum { + /** + * SIMPLE_AUTHENTICATION = 0; + */ + SIMPLE_AUTHENTICATION(0, 0), + /** + * SECURE_AUTHENTICATION = 1; + */ + SECURE_AUTHENTICATION(1, 1), + /** + * AUTHORIZATION = 2; + */ + AUTHORIZATION(2, 2), + /** + * CELL_AUTHORIZATION = 3; + */ + CELL_AUTHORIZATION(3, 3), + /** + * CELL_VISIBILITY = 4; + */ + CELL_VISIBILITY(4, 4), + ; + + /** + * SIMPLE_AUTHENTICATION = 0; + */ + public static final int SIMPLE_AUTHENTICATION_VALUE = 0; + /** + * SECURE_AUTHENTICATION = 1; + */ + public static final int SECURE_AUTHENTICATION_VALUE = 1; + /** + * AUTHORIZATION = 2; + */ + public static final int AUTHORIZATION_VALUE = 2; + /** + * CELL_AUTHORIZATION = 3; + */ + public static final int CELL_AUTHORIZATION_VALUE = 3; + /** + * CELL_VISIBILITY = 4; + */ + public static final int CELL_VISIBILITY_VALUE = 4; + + + public final int getNumber() { return value; } + + public static Capability valueOf(int value) { + switch (value) { + case 0: return SIMPLE_AUTHENTICATION; + case 1: return SECURE_AUTHENTICATION; + case 2: return AUTHORIZATION; + case 3: return CELL_AUTHORIZATION; + case 4: return CELL_VISIBILITY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Capability findValueByNumber(int number) { + return Capability.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final Capability[] VALUES = values(); + + public static Capability valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Capability(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.SecurityCapabilitiesResponse.Capability) } + + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + public static final int CAPABILITIES_FIELD_NUMBER = 1; + private java.util.List capabilities_; /** - * optional uint64 proc_id = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public long getProcId() { - return procId_; + public java.util.List getCapabilitiesList() { + return capabilities_; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public int getCapabilitiesCount() { + return capabilities_.size(); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { + return capabilities_.get(index); } private void initFields() { - procId_ = 0L; + capabilities_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -63375,8 +58874,8 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, procId_); + for (int i = 0; i < capabilities_.size(); i++) { + output.writeEnum(1, capabilities_.get(i).getNumber()); } getUnknownFields().writeTo(output); } @@ -63387,9 +58886,14 @@ public final class MasterProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, procId_); + { + int dataSize = 0; + for (int i = 0; i < capabilities_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeEnumSizeNoTag(capabilities_.get(i).getNumber()); + } + size += dataSize; + size += 1 * capabilities_.size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -63408,17 +58912,14 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) obj; boolean result = true; - result = result && (hasProcId() == other.hasProcId()); - if (hasProcId()) { - result = result && (getProcId() - == other.getProcId()); - } + result = result && getCapabilitiesList() + .equals(other.getCapabilitiesList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -63432,62 +58933,62 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasProcId()) { - hash = (37 * hash) + PROC_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getProcId()); + if (getCapabilitiesCount() > 0) { + hash = (37 * hash) + CAPABILITIES_FIELD_NUMBER; + hash = (53 * hash) + hashEnumList(getCapabilitiesList()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -63496,7 +58997,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -63508,24 +59009,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.RestoreTablesResponse} + * Protobuf type {@code hbase.pb.SecurityCapabilitiesResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -63545,7 +59046,7 @@ public final class MasterProtos { public Builder clear() { super.clear(); - procId_ = 0L; + capabilities_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -63556,47 +59057,53 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = java.util.Collections.unmodifiableList(capabilities_); + bitField0_ = (bitField0_ & ~0x00000001); } - result.procId_ = procId_; - result.bitField0_ = to_bitField0_; + result.capabilities_ = capabilities_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()) return this; - if (other.hasProcId()) { - setProcId(other.getProcId()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()) return this; + if (!other.capabilities_.isEmpty()) { + if (capabilities_.isEmpty()) { + capabilities_ = other.capabilities_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCapabilitiesIsMutable(); + capabilities_.addAll(other.capabilities_); + } + onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -63610,11 +59117,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -63625,48 +59132,87 @@ public final class MasterProtos { } private int bitField0_; - // optional uint64 proc_id = 1; - private long procId_ ; + // repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + private java.util.List capabilities_ = + java.util.Collections.emptyList(); + private void ensureCapabilitiesIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + capabilities_ = new java.util.ArrayList(capabilities_); + bitField0_ |= 0x00000001; + } + } /** - * optional uint64 proc_id = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public boolean hasProcId() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List getCapabilitiesList() { + return java.util.Collections.unmodifiableList(capabilities_); } /** - * optional uint64 proc_id = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public long getProcId() { - return procId_; + public int getCapabilitiesCount() { + return capabilities_.size(); } /** - * optional uint64 proc_id = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public Builder setProcId(long value) { - bitField0_ |= 0x00000001; - procId_ = value; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability getCapabilities(int index) { + return capabilities_.get(index); + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder setCapabilities( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCapabilitiesIsMutable(); + capabilities_.set(index, value); onChanged(); return this; } /** - * optional uint64 proc_id = 1; + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; */ - public Builder clearProcId() { + public Builder addCapabilities(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCapabilitiesIsMutable(); + capabilities_.add(value); + onChanged(); + return this; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder addAllCapabilities( + java.lang.Iterable values) { + ensureCapabilitiesIsMutable(); + super.addAll(values, capabilities_); + onChanged(); + return this; + } + /** + * repeated .hbase.pb.SecurityCapabilitiesResponse.Capability capabilities = 1; + */ + public Builder clearCapabilities() { + capabilities_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); - procId_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.SecurityCapabilitiesResponse) } static { - defaultInstance = new RestoreTablesResponse(true); + defaultInstance = new SecurityCapabilitiesResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) } /** @@ -64390,30 +59936,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); - /** - * rpc backupTables(.hbase.pb.BackupTablesRequest) returns (.hbase.pb.BackupTablesResponse); - * - *
-       ** backup table set 
-       * 
- */ - public abstract void backupTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, - com.google.protobuf.RpcCallback done); - - /** - * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); - * - *
-       ** restore table set 
-       * 
- */ - public abstract void restoreTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, - com.google.protobuf.RpcCallback done); - } public static com.google.protobuf.Service newReflectiveService( @@ -64875,22 +60397,6 @@ public final class MasterProtos { impl.listProcedures(controller, request, done); } - @java.lang.Override - public void backupTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, - com.google.protobuf.RpcCallback done) { - impl.backupTables(controller, request, done); - } - - @java.lang.Override - public void restoreTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, - com.google.protobuf.RpcCallback done) { - impl.restoreTables(controller, request, done); - } - }; } @@ -65027,10 +60533,6 @@ public final class MasterProtos { return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); - case 57: - return impl.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request); - case 58: - return impl.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -65159,10 +60661,6 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); - case 57: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); - case 58: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -65291,10 +60789,6 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); - case 57: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); - case 58: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -66016,30 +61510,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); - /** - * rpc backupTables(.hbase.pb.BackupTablesRequest) returns (.hbase.pb.BackupTablesResponse); - * - *
-     ** backup table set 
-     * 
- */ - public abstract void backupTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, - com.google.protobuf.RpcCallback done); - - /** - * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); - * - *
-     ** restore table set 
-     * 
- */ - public abstract void restoreTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, - com.google.protobuf.RpcCallback done); - public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -66347,16 +61817,6 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 57: - this.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 58: - this.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -66485,10 +61945,6 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); - case 57: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); - case 58: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -66617,10 +62073,6 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); - case 57: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); - case 58: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -67496,36 +62948,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); } - - public void backupTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(57), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance())); - } - - public void restoreTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(58), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance())); - } } public static BlockingInterface newBlockingStub( @@ -67818,16 +63240,6 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse backupTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) - throws com.google.protobuf.ServiceException; - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) - throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -68520,30 +63932,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse backupTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(57), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()); - } - - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(58), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()); - } - } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -69094,26 +64482,6 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_BackupTablesRequest_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_BackupTablesResponse_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_RestoreTablesRequest_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_RestoreTablesResponse_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -69123,351 +64491,331 @@ public final class MasterProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014B" + - "ackup.proto\032\014Client.proto\032\023ClusterStatus" + - ".proto\032\023ErrorHandling.proto\032\017Procedure.p" + - "roto\032\013Quota.proto\"\234\001\n\020AddColumnRequest\022\'" + - "\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022" + - "5\n\017column_families\030\002 \002(\0132\034.hbase.pb.Colu" + - "mnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + - "\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnResponse\022\017\n" + - "\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnRequest\022\'\n" + - "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022\023", - "\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004" + - ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColumnRes" + - "ponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyColumnR" + - "equest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Ta" + - "bleName\0225\n\017column_families\030\002 \002(\0132\034.hbase" + - ".pb.ColumnFamilySchema\022\026\n\013nonce_group\030\003 " + - "\001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyColumn" + - "Response\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRegionR" + - "equest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" + - "Specifier\022.\n\020dest_server_name\030\002 \001(\0132\024.hb", - "ase.pb.ServerName\"\024\n\022MoveRegionResponse\"" + - "\222\001\n\035DispatchMergingRegionsRequest\022+\n\010reg" + - "ion_a\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022+" + - "\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionSpecif" + - "ier\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036Dispatch" + - "MergingRegionsResponse\"@\n\023AssignRegionRe" + - "quest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionS" + - "pecifier\"\026\n\024AssignRegionResponse\"X\n\025Unas" + - "signRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbas" + - "e.pb.RegionSpecifier\022\024\n\005force\030\002 \001(\010:\005fal", - "se\"\030\n\026UnassignRegionResponse\"A\n\024OfflineR" + - "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." + - "RegionSpecifier\"\027\n\025OfflineRegionResponse" + - "\"\177\n\022CreateTableRequest\022+\n\014table_schema\030\001" + - " \002(\0132\025.hbase.pb.TableSchema\022\022\n\nsplit_key" + - "s\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce" + - "\030\004 \001(\004:\0010\"&\n\023CreateTableResponse\022\017\n\007proc" + - "_id\030\001 \001(\004\"g\n\022DeleteTableRequest\022\'\n\ntable" + - "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonc" + - "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023De", - "leteTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024T" + - "runcateTableRequest\022&\n\ttableName\030\001 \002(\0132\023" + - ".hbase.pb.TableName\022\035\n\016preserveSplits\030\002 " + - "\001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005no" + - "nce\030\004 \001(\004:\0010\"(\n\025TruncateTableResponse\022\017\n" + - "\007proc_id\030\001 \001(\004\"g\n\022EnableTableRequest\022\'\n\n" + - "table_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n" + - "\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"" + - "&\n\023EnableTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" + - "h\n\023DisableTableRequest\022\'\n\ntable_name\030\001 \002", + "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" + + "lient.proto\032\023ClusterStatus.proto\032\023ErrorH" + + "andling.proto\032\017Procedure.proto\032\013Quota.pr" + + "oto\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001" + + " \002(\0132\023.hbase.pb.TableName\0225\n\017column_fami" + + "lies\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema" + + "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:" + + "\0010\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004" + + "\"}\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 " + + "\002(\0132\023.hbase.pb.TableName\022\023\n\013column_name\030", + "\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" + + " \001(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_" + + "id\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntabl" + + "e_name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017col" + + "umn_families\030\002 \002(\0132\034.hbase.pb.ColumnFami" + + "lySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonc" + + "e\030\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007pr" + + "oc_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006regi" + + "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020d" + + "est_server_name\030\002 \001(\0132\024.hbase.pb.ServerN", + "ame\"\024\n\022MoveRegionResponse\"\222\001\n\035DispatchMe" + + "rgingRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031." + + "hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 \002" + + "(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" + + "e\030\003 \001(\010:\005false\" \n\036DispatchMergingRegions" + + "Response\"@\n\023AssignRegionRequest\022)\n\006regio" + + "n\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026\n\024As" + + "signRegionResponse\"X\n\025UnassignRegionRequ" + + "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe" + + "cifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unassign", + "RegionResponse\"A\n\024OfflineRegionRequest\022)" + + "\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifie" + + "r\"\027\n\025OfflineRegionResponse\"\177\n\022CreateTabl" + + "eRequest\022+\n\014table_schema\030\001 \002(\0132\025.hbase.p" + + "b.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n\013non" + + "ce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023C" + + "reateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022D" + + "eleteTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023." + + "hbase.pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:" + + "\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableRespo", + "nse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTableRe" + + "quest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb.Tabl" + + "eName\022\035\n\016preserveSplits\030\002 \001(\010:\005false\022\026\n\013" + + "nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(" + + "\n\025TruncateTableResponse\022\017\n\007proc_id\030\001 \001(\004" + + "\"g\n\022EnableTableRequest\022\'\n\ntable_name\030\001 \002" + "(\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002" + - " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024DisableTabl" + - "eResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022ModifyTab" + - "leRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" + - ".TableName\022+\n\014table_schema\030\002 \002(\0132\025.hbase" + - ".pb.TableSchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022" + - "\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableResponse" + - "\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamespaceRequ" + - "est\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase" + - ".pb.NamespaceDescriptor\022\026\n\013nonce_group\030\002", - " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027CreateNames" + - "paceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteN" + - "amespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\022\026" + - "\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010" + - "\"*\n\027DeleteNamespaceResponse\022\017\n\007proc_id\030\001" + - " \001(\004\"~\n\026ModifyNamespaceRequest\022:\n\023namesp" + - "aceDescriptor\030\001 \002(\0132\035.hbase.pb.Namespace" + - "Descriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005no" + - "nce\030\003 \001(\004:\0010\"*\n\027ModifyNamespaceResponse\022" + - "\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespaceDescript", - "orRequest\022\025\n\rnamespaceName\030\001 \002(\t\"\\\n\036GetN" + - "amespaceDescriptorResponse\022:\n\023namespaceD" + - "escriptor\030\001 \002(\0132\035.hbase.pb.NamespaceDesc" + - "riptor\"!\n\037ListNamespaceDescriptorsReques" + - "t\"^\n ListNamespaceDescriptorsResponse\022:\n" + - "\023namespaceDescriptor\030\001 \003(\0132\035.hbase.pb.Na" + - "mespaceDescriptor\"?\n&ListTableDescriptor" + - "sByNamespaceRequest\022\025\n\rnamespaceName\030\001 \002" + - "(\t\"U\n\'ListTableDescriptorsByNamespaceRes" + - "ponse\022*\n\013tableSchema\030\001 \003(\0132\025.hbase.pb.Ta", - "bleSchema\"9\n ListTableNamesByNamespaceRe" + - "quest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n!ListTabl" + - "eNamesByNamespaceResponse\022&\n\ttableName\030\001" + - " \003(\0132\023.hbase.pb.TableName\"\021\n\017ShutdownReq" + - "uest\"\022\n\020ShutdownResponse\"\023\n\021StopMasterRe" + - "quest\"\024\n\022StopMasterResponse\"\037\n\016BalanceRe" + - "quest\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022" + - "\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunn" + - "ingRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 " + - "\001(\010\"8\n\032SetBalancerRunningResponse\022\032\n\022pre", - "v_balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabl" + - "edRequest\",\n\031IsBalancerEnabledResponse\022\017" + - "\n\007enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeEnable" + - "dRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous" + - "\030\002 \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb." + - "MasterSwitchType\"4\n\036SetSplitOrMergeEnabl" + - "edResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSpli" + - "tOrMergeEnabledRequest\022/\n\013switch_type\030\001 " + - "\002(\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSpl" + - "itOrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(", - "\010\"\022\n\020NormalizeRequest\"+\n\021NormalizeRespon" + - "se\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormali" + - "zerRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNorm" + - "alizerRunningResponse\022\035\n\025prev_normalizer" + - "_value\030\001 \001(\010\"\034\n\032IsNormalizerEnabledReque" + - "st\".\n\033IsNormalizerEnabledResponse\022\017\n\007ena" + - "bled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026R" + - "unCatalogScanResponse\022\023\n\013scan_result\030\001 \001" + - "(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006ena" + - "ble\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespons", - "e\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanito" + - "rEnabledRequest\"0\n\037IsCatalogJanitorEnabl" + - "edResponse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReq" + - "uest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapsh" + - "otDescription\",\n\020SnapshotResponse\022\030\n\020exp" + - "ected_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnaps" + - "hotsRequest\"Q\n\035GetCompletedSnapshotsResp" + - "onse\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snaps" + - "hotDescription\"H\n\025DeleteSnapshotRequest\022" + - "/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDes", - "cription\"\030\n\026DeleteSnapshotResponse\"I\n\026Re" + - "storeSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035" + - ".hbase.pb.SnapshotDescription\"\031\n\027Restore" + - "SnapshotResponse\"H\n\025IsSnapshotDoneReques" + - "t\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotD" + - "escription\"^\n\026IsSnapshotDoneResponse\022\023\n\004" + - "done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hb" + - "ase.pb.SnapshotDescription\"O\n\034IsRestoreS" + - "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" + - "base.pb.SnapshotDescription\"4\n\035IsRestore", - "SnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fals" + - "e\"F\n\033GetSchemaAlterStatusRequest\022\'\n\ntabl" + - "e_name\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034Get" + - "SchemaAlterStatusResponse\022\035\n\025yet_to_upda" + - "te_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"" + - "\213\001\n\032GetTableDescriptorsRequest\022(\n\013table_" + - "names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005rege" + - "x\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005fal" + - "se\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescrip" + - "torsResponse\022+\n\014table_schema\030\001 \003(\0132\025.hba", - "se.pb.TableSchema\"[\n\024GetTableNamesReques" + - "t\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002" + - " \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTab" + - "leNamesResponse\022(\n\013table_names\030\001 \003(\0132\023.h" + - "base.pb.TableName\"?\n\024GetTableStateReques" + - "t\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNa" + - "me\"B\n\025GetTableStateResponse\022)\n\013table_sta" + - "te\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027GetClu" + - "sterStatusRequest\"K\n\030GetClusterStatusRes" + - "ponse\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb", - ".ClusterStatus\"\030\n\026IsMasterRunningRequest" + - "\"4\n\027IsMasterRunningResponse\022\031\n\021is_master" + - "_running\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221" + - "\n\tprocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDe" + - "scription\"F\n\025ExecProcedureResponse\022\030\n\020ex" + - "pected_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(" + - "\014\"K\n\026IsProcedureDoneRequest\0221\n\tprocedure" + - "\030\001 \001(\0132\036.hbase.pb.ProcedureDescription\"`" + - "\n\027IsProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:" + - "\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Proc", - "edureDescription\",\n\031GetProcedureResultRe" + - "quest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureR" + - "esultResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb." + - "GetProcedureResultResponse.State\022\022\n\nstar" + - "t_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006res" + - "ult\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb." + - "ForeignExceptionMessage\"1\n\005State\022\r\n\tNOT_" + - "FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"P\n\025Ab" + - "ortProcedureRequest\022\017\n\007proc_id\030\001 \002(\004\022&\n\030" + - "may_interrupt_if_running\030\002 \001(\010:\004true\"6\n\026", - "AbortProcedureResponse\022\034\n\024is_procedure_a" + - "borted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n" + - "\026ListProceduresResponse\022&\n\tprocedure\030\001 \003" + - "(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaRequ" + - "est\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001" + - "(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(" + - "\0132\023.hbase.pb.TableName\022\022\n\nremove_all\030\005 \001" + - "(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle\030\007" + - " \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020SetQu" + - "otaResponse\"J\n\037MajorCompactionTimestampR", - "equest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Ta" + - "bleName\"U\n(MajorCompactionTimestampForRe" + - "gionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" + - "egionSpecifier\"@\n MajorCompactionTimesta" + - "mpResponse\022\034\n\024compaction_timestamp\030\001 \002(\003" + - "\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Secu" + - "rityCapabilitiesResponse\022G\n\014capabilities" + - "\030\001 \003(\01621.hbase.pb.SecurityCapabilitiesRe" + - "sponse.Capability\"\202\001\n\nCapability\022\031\n\025SIMP" + - "LE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICA", - "TION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHOR" + - "IZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"\336\001\n\023Backu" + - "pTablesRequest\022\"\n\004type\030\001 \002(\0162\024.hbase.pb." + - "BackupType\022#\n\006tables\030\002 \003(\0132\023.hbase.pb.Ta" + - "bleName\022\027\n\017target_root_dir\030\003 \002(\t\022\017\n\007work" + - "ers\030\004 \001(\003\022\021\n\tbandwidth\030\005 \001(\003\022\027\n\017backup_s" + - "et_name\030\006 \001(\t\022\026\n\013nonce_group\030\007 \001(\004:\0010\022\020\n" + - "\005nonce\030\010 \001(\004:\0010\":\n\024BackupTablesResponse\022" + - "\017\n\007proc_id\030\001 \001(\004\022\021\n\tbackup_id\030\002 \001(\t\"\357\001\n\024" + - "RestoreTablesRequest\022\021\n\tbackup_id\030\001 \002(\t\022", - "#\n\006tables\030\002 \003(\0132\023.hbase.pb.TableName\022*\n\r" + - "target_tables\030\003 \003(\0132\023.hbase.pb.TableName" + - "\022\027\n\017backup_root_dir\030\004 \002(\t\022\035\n\025dependency_" + - "check_only\030\005 \001(\010\022\021\n\toverwrite\030\006 \001(\010\022\026\n\013n" + - "once_group\030\007 \001(\004:\0010\022\020\n\005nonce\030\010 \001(\004:\0010\"(\n" + - "\025RestoreTablesResponse\022\017\n\007proc_id\030\001 \001(\004*" + - "(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020" + - "\001*8\n\022RestoreTablesState\022\016\n\nVALIDATION\020\001\022" + - "\022\n\016RESTORE_IMAGES\020\0022\364)\n\rMasterService\022e\n" + - "\024GetSchemaAlterStatus\022%.hbase.pb.GetSche", - "maAlterStatusRequest\032&.hbase.pb.GetSchem" + - "aAlterStatusResponse\022b\n\023GetTableDescript" + - "ors\022$.hbase.pb.GetTableDescriptorsReques" + - "t\032%.hbase.pb.GetTableDescriptorsResponse" + - "\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNam" + - "esRequest\032\037.hbase.pb.GetTableNamesRespon" + - "se\022Y\n\020GetClusterStatus\022!.hbase.pb.GetClu" + - "sterStatusRequest\032\".hbase.pb.GetClusterS" + - "tatusResponse\022V\n\017IsMasterRunning\022 .hbase" + - ".pb.IsMasterRunningRequest\032!.hbase.pb.Is", - "MasterRunningResponse\022D\n\tAddColumn\022\032.hba" + - "se.pb.AddColumnRequest\032\033.hbase.pb.AddCol" + - "umnResponse\022M\n\014DeleteColumn\022\035.hbase.pb.D" + - "eleteColumnRequest\032\036.hbase.pb.DeleteColu" + - "mnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.Mo" + - "difyColumnRequest\032\036.hbase.pb.ModifyColum" + - "nResponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveR" + - "egionRequest\032\034.hbase.pb.MoveRegionRespon" + - "se\022k\n\026DispatchMergingRegions\022\'.hbase.pb." + - "DispatchMergingRegionsRequest\032(.hbase.pb", - ".DispatchMergingRegionsResponse\022M\n\014Assig" + - "nRegion\022\035.hbase.pb.AssignRegionRequest\032\036" + - ".hbase.pb.AssignRegionResponse\022S\n\016Unassi" + - "gnRegion\022\037.hbase.pb.UnassignRegionReques" + - "t\032 .hbase.pb.UnassignRegionResponse\022P\n\rO" + - "fflineRegion\022\036.hbase.pb.OfflineRegionReq" + - "uest\032\037.hbase.pb.OfflineRegionResponse\022J\n" + - "\013DeleteTable\022\034.hbase.pb.DeleteTableReque" + - "st\032\035.hbase.pb.DeleteTableResponse\022P\n\rtru" + - "ncateTable\022\036.hbase.pb.TruncateTableReque", - "st\032\037.hbase.pb.TruncateTableResponse\022J\n\013E" + - "nableTable\022\034.hbase.pb.EnableTableRequest" + - "\032\035.hbase.pb.EnableTableResponse\022M\n\014Disab" + - "leTable\022\035.hbase.pb.DisableTableRequest\032\036" + - ".hbase.pb.DisableTableResponse\022J\n\013Modify" + - "Table\022\034.hbase.pb.ModifyTableRequest\032\035.hb" + - "ase.pb.ModifyTableResponse\022J\n\013CreateTabl" + - "e\022\034.hbase.pb.CreateTableRequest\032\035.hbase." + - "pb.CreateTableResponse\022A\n\010Shutdown\022\031.hba" + - "se.pb.ShutdownRequest\032\032.hbase.pb.Shutdow", - "nResponse\022G\n\nStopMaster\022\033.hbase.pb.StopM" + - "asterRequest\032\034.hbase.pb.StopMasterRespon" + - "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" + - "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance" + - "rRunning\022#.hbase.pb.SetBalancerRunningRe" + - "quest\032$.hbase.pb.SetBalancerRunningRespo" + - "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa" + - "lancerEnabledRequest\032#.hbase.pb.IsBalanc" + - "erEnabledResponse\022k\n\026SetSplitOrMergeEnab" + - "led\022\'.hbase.pb.SetSplitOrMergeEnabledReq", - "uest\032(.hbase.pb.SetSplitOrMergeEnabledRe" + - "sponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase." + - "pb.IsSplitOrMergeEnabledRequest\032\'.hbase." + - "pb.IsSplitOrMergeEnabledResponse\022D\n\tNorm" + - "alize\022\032.hbase.pb.NormalizeRequest\032\033.hbas" + - "e.pb.NormalizeResponse\022e\n\024SetNormalizerR" + - "unning\022%.hbase.pb.SetNormalizerRunningRe" + - "quest\032&.hbase.pb.SetNormalizerRunningRes" + - "ponse\022b\n\023IsNormalizerEnabled\022$.hbase.pb." + - "IsNormalizerEnabledRequest\032%.hbase.pb.Is", - "NormalizerEnabledResponse\022S\n\016RunCatalogS" + - "can\022\037.hbase.pb.RunCatalogScanRequest\032 .h" + - "base.pb.RunCatalogScanResponse\022e\n\024Enable" + - "CatalogJanitor\022%.hbase.pb.EnableCatalogJ" + - "anitorRequest\032&.hbase.pb.EnableCatalogJa" + - "nitorResponse\022n\n\027IsCatalogJanitorEnabled" + - "\022(.hbase.pb.IsCatalogJanitorEnabledReque" + - "st\032).hbase.pb.IsCatalogJanitorEnabledRes" + - "ponse\022^\n\021ExecMasterService\022#.hbase.pb.Co" + - "processorServiceRequest\032$.hbase.pb.Copro", - "cessorServiceResponse\022A\n\010Snapshot\022\031.hbas" + - "e.pb.SnapshotRequest\032\032.hbase.pb.Snapshot" + - "Response\022h\n\025GetCompletedSnapshots\022&.hbas" + - "e.pb.GetCompletedSnapshotsRequest\032\'.hbas" + - "e.pb.GetCompletedSnapshotsResponse\022S\n\016De" + - "leteSnapshot\022\037.hbase.pb.DeleteSnapshotRe" + - "quest\032 .hbase.pb.DeleteSnapshotResponse\022" + - "S\n\016IsSnapshotDone\022\037.hbase.pb.IsSnapshotD" + - "oneRequest\032 .hbase.pb.IsSnapshotDoneResp" + - "onse\022V\n\017RestoreSnapshot\022 .hbase.pb.Resto", - "reSnapshotRequest\032!.hbase.pb.RestoreSnap" + - "shotResponse\022h\n\025IsRestoreSnapshotDone\022&." + - "hbase.pb.IsRestoreSnapshotDoneRequest\032\'." + - "hbase.pb.IsRestoreSnapshotDoneResponse\022P" + - "\n\rExecProcedure\022\036.hbase.pb.ExecProcedure" + - "Request\032\037.hbase.pb.ExecProcedureResponse" + - "\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exec" + - "ProcedureRequest\032\037.hbase.pb.ExecProcedur" + - "eResponse\022V\n\017IsProcedureDone\022 .hbase.pb." + - "IsProcedureDoneRequest\032!.hbase.pb.IsProc", - "edureDoneResponse\022V\n\017ModifyNamespace\022 .h" + - "base.pb.ModifyNamespaceRequest\032!.hbase.p" + - "b.ModifyNamespaceResponse\022V\n\017CreateNames" + - "pace\022 .hbase.pb.CreateNamespaceRequest\032!" + - ".hbase.pb.CreateNamespaceResponse\022V\n\017Del" + - "eteNamespace\022 .hbase.pb.DeleteNamespaceR" + - "equest\032!.hbase.pb.DeleteNamespaceRespons" + - "e\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb.G" + - "etNamespaceDescriptorRequest\032(.hbase.pb." + - "GetNamespaceDescriptorResponse\022q\n\030ListNa", - "mespaceDescriptors\022).hbase.pb.ListNamesp" + - "aceDescriptorsRequest\032*.hbase.pb.ListNam" + - "espaceDescriptorsResponse\022\206\001\n\037ListTableD" + - "escriptorsByNamespace\0220.hbase.pb.ListTab" + - "leDescriptorsByNamespaceRequest\0321.hbase." + - "pb.ListTableDescriptorsByNamespaceRespon" + - "se\022t\n\031ListTableNamesByNamespace\022*.hbase." + - "pb.ListTableNamesByNamespaceRequest\032+.hb" + - "ase.pb.ListTableNamesByNamespaceResponse" + - "\022P\n\rGetTableState\022\036.hbase.pb.GetTableSta", - "teRequest\032\037.hbase.pb.GetTableStateRespon" + - "se\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReques" + - "t\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLast" + - "MajorCompactionTimestamp\022).hbase.pb.Majo" + - "rCompactionTimestampRequest\032*.hbase.pb.M" + - "ajorCompactionTimestampResponse\022\212\001\n(getL" + - "astMajorCompactionTimestampForRegion\0222.h" + - "base.pb.MajorCompactionTimestampForRegio" + - "nRequest\032*.hbase.pb.MajorCompactionTimes" + - "tampResponse\022_\n\022getProcedureResult\022#.hba", - "se.pb.GetProcedureResultRequest\032$.hbase." + - "pb.GetProcedureResultResponse\022h\n\027getSecu" + - "rityCapabilities\022%.hbase.pb.SecurityCapa" + - "bilitiesRequest\032&.hbase.pb.SecurityCapab" + - "ilitiesResponse\022S\n\016AbortProcedure\022\037.hbas" + - "e.pb.AbortProcedureRequest\032 .hbase.pb.Ab" + - "ortProcedureResponse\022S\n\016ListProcedures\022\037" + - ".hbase.pb.ListProceduresRequest\032 .hbase." + - "pb.ListProceduresResponse\022M\n\014backupTable" + - "s\022\035.hbase.pb.BackupTablesRequest\032\036.hbase", - ".pb.BackupTablesResponse\022P\n\rrestoreTable" + - "s\022\036.hbase.pb.RestoreTablesRequest\032\037.hbas" + - "e.pb.RestoreTablesResponseBB\n*org.apache" + - ".hadoop.hbase.protobuf.generatedB\014Master" + - "ProtosH\001\210\001\001\240\001\001" + " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableTable" + + "Response\022\017\n\007proc_id\030\001 \001(\004\"h\n\023DisableTabl" + + "eRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.", + "TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non" + + "ce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022\017\n\007p" + + "roc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022\'\n\nt" + + "able_name\030\001 \002(\0132\023.hbase.pb.TableName\022+\n\014" + + "table_schema\030\002 \002(\0132\025.hbase.pb.TableSchem" + + "a\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004" + + ":\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_id\030\001 " + + "\001(\004\"~\n\026CreateNamespaceRequest\022:\n\023namespa" + + "ceDescriptor\030\001 \002(\0132\035.hbase.pb.NamespaceD" + + "escriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non", + "ce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceResponse\022\017" + + "\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceReques" + + "t\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_group\030" + + "\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027DeleteName" + + "spaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Modify" + + "NamespaceRequest\022:\n\023namespaceDescriptor\030" + + "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013" + + "nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*" + + "\n\027ModifyNamespaceResponse\022\017\n\007proc_id\030\001 \001" + + "(\004\"6\n\035GetNamespaceDescriptorRequest\022\025\n\rn", + "amespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDescri" + + "ptorResponse\022:\n\023namespaceDescriptor\030\001 \002(" + + "\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037List" + + "NamespaceDescriptorsRequest\"^\n ListNames" + + "paceDescriptorsResponse\022:\n\023namespaceDesc" + + "riptor\030\001 \003(\0132\035.hbase.pb.NamespaceDescrip" + + "tor\"?\n&ListTableDescriptorsByNamespaceRe" + + "quest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTabl" + + "eDescriptorsByNamespaceResponse\022*\n\013table" + + "Schema\030\001 \003(\0132\025.hbase.pb.TableSchema\"9\n L", + "istTableNamesByNamespaceRequest\022\025\n\rnames" + + "paceName\030\001 \002(\t\"K\n!ListTableNamesByNamesp" + + "aceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.p" + + "b.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" + + "wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" + + "asterResponse\"\037\n\016BalanceRequest\022\r\n\005force" + + "\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" + + "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" + + "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" + + "ncerRunningResponse\022\032\n\022prev_balance_valu", + "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" + + "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" + + "\010\"w\n\035SetSplitOrMergeEnabledRequest\022\017\n\007en" + + "abled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swit" + + "ch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchTy" + + "pe\"4\n\036SetSplitOrMergeEnabledResponse\022\022\n\n" + + "prev_value\030\001 \003(\010\"O\n\034IsSplitOrMergeEnable" + + "dRequest\022/\n\013switch_type\030\001 \002(\0162\032.hbase.pb" + + ".MasterSwitchType\"0\n\035IsSplitOrMergeEnabl" + + "edResponse\022\017\n\007enabled\030\001 \002(\010\"\022\n\020Normalize", + "Request\"+\n\021NormalizeResponse\022\026\n\016normaliz" + + "er_ran\030\001 \002(\010\")\n\033SetNormalizerRunningRequ" + + "est\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunningR" + + "esponse\022\035\n\025prev_normalizer_value\030\001 \001(\010\"\034" + + "\n\032IsNormalizerEnabledRequest\".\n\033IsNormal" + + "izerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025" + + "RunCatalogScanRequest\"-\n\026RunCatalogScanR" + + "esponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCa" + + "talogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034E" + + "nableCatalogJanitorResponse\022\022\n\nprev_valu", + "e\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledReques" + + "t\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005" + + "value\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010snapsh" + + "ot\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"" + + ",\n\020SnapshotResponse\022\030\n\020expected_timeout\030" + + "\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"Q\n" + + "\035GetCompletedSnapshotsResponse\0220\n\tsnapsh" + + "ots\030\001 \003(\0132\035.hbase.pb.SnapshotDescription" + + "\"H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 " + + "\002(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026De", + "leteSnapshotResponse\"I\n\026RestoreSnapshotR" + + "equest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snap" + + "shotDescription\"\031\n\027RestoreSnapshotRespon" + + "se\"H\n\025IsSnapshotDoneRequest\022/\n\010snapshot\030" + + "\001 \001(\0132\035.hbase.pb.SnapshotDescription\"^\n\026" + + "IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fa" + + "lse\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.Snapsho" + + "tDescription\"O\n\034IsRestoreSnapshotDoneReq" + + "uest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snapsh" + + "otDescription\"4\n\035IsRestoreSnapshotDoneRe", + "sponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSchema" + + "AlterStatusRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + + ".hbase.pb.TableName\"T\n\034GetSchemaAlterSta" + + "tusResponse\022\035\n\025yet_to_update_regions\030\001 \001" + + "(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTableDe" + + "scriptorsRequest\022(\n\013table_names\030\001 \003(\0132\023." + + "hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022inc" + + "lude_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespac" + + "e\030\004 \001(\t\"J\n\033GetTableDescriptorsResponse\022+" + + "\n\014table_schema\030\001 \003(\0132\025.hbase.pb.TableSch", + "ema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001" + + "(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n" + + "\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesRespons" + + "e\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableN" + + "ame\"?\n\024GetTableStateRequest\022\'\n\ntable_nam" + + "e\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTable" + + "StateResponse\022)\n\013table_state\030\001 \002(\0132\024.hba" + + "se.pb.TableState\"\031\n\027GetClusterStatusRequ" + + "est\"K\n\030GetClusterStatusResponse\022/\n\016clust" + + "er_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus", + "\"\030\n\026IsMasterRunningRequest\"4\n\027IsMasterRu" + + "nningResponse\022\031\n\021is_master_running\030\001 \002(\010" + + "\"I\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 " + + "\002(\0132\036.hbase.pb.ProcedureDescription\"F\n\025E" + + "xecProcedureResponse\022\030\n\020expected_timeout" + + "\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedu" + + "reDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase" + + ".pb.ProcedureDescription\"`\n\027IsProcedureD" + + "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snap" + + "shot\030\002 \001(\0132\036.hbase.pb.ProcedureDescripti", + "on\",\n\031GetProcedureResultRequest\022\017\n\007proc_" + + "id\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\022" + + "9\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedureRe" + + "sultResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023" + + "\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\te" + + "xception\030\005 \001(\0132!.hbase.pb.ForeignExcepti" + + "onMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUN" + + "NING\020\001\022\014\n\010FINISHED\020\002\"P\n\025AbortProcedureRe" + + "quest\022\017\n\007proc_id\030\001 \002(\004\022&\n\030may_interrupt_" + + "if_running\030\002 \001(\010:\004true\"6\n\026AbortProcedure", + "Response\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027" + + "\n\025ListProceduresRequest\"@\n\026ListProcedure" + + "sResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb." + + "Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_na" + + "me\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespac" + + "e\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.T" + + "ableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_g" + + "lobals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.p" + + "b.ThrottleRequest\"\022\n\020SetQuotaResponse\"J\n" + + "\037MajorCompactionTimestampRequest\022\'\n\ntabl", + "e_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(Maj" + + "orCompactionTimestampForRegionRequest\022)\n" + + "\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier" + + "\"@\n MajorCompactionTimestampResponse\022\034\n\024" + + "compaction_timestamp\030\001 \002(\003\"\035\n\033SecurityCa" + + "pabilitiesRequest\"\354\001\n\034SecurityCapabiliti" + + "esResponse\022G\n\014capabilities\030\001 \003(\01621.hbase" + + ".pb.SecurityCapabilitiesResponse.Capabil" + + "ity\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICAT" + + "ION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTH", + "ORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017C" + + "ELL_VISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005" + + "SPLIT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022e\n\024" + + "GetSchemaAlterStatus\022%.hbase.pb.GetSchem" + + "aAlterStatusRequest\032&.hbase.pb.GetSchema" + + "AlterStatusResponse\022b\n\023GetTableDescripto" + + "rs\022$.hbase.pb.GetTableDescriptorsRequest" + + "\032%.hbase.pb.GetTableDescriptorsResponse\022" + + "P\n\rGetTableNames\022\036.hbase.pb.GetTableName" + + "sRequest\032\037.hbase.pb.GetTableNamesRespons", + "e\022Y\n\020GetClusterStatus\022!.hbase.pb.GetClus" + + "terStatusRequest\032\".hbase.pb.GetClusterSt" + + "atusResponse\022V\n\017IsMasterRunning\022 .hbase." + + "pb.IsMasterRunningRequest\032!.hbase.pb.IsM" + + "asterRunningResponse\022D\n\tAddColumn\022\032.hbas" + + "e.pb.AddColumnRequest\032\033.hbase.pb.AddColu" + + "mnResponse\022M\n\014DeleteColumn\022\035.hbase.pb.De" + + "leteColumnRequest\032\036.hbase.pb.DeleteColum" + + "nResponse\022M\n\014ModifyColumn\022\035.hbase.pb.Mod" + + "ifyColumnRequest\032\036.hbase.pb.ModifyColumn", + "Response\022G\n\nMoveRegion\022\033.hbase.pb.MoveRe" + + "gionRequest\032\034.hbase.pb.MoveRegionRespons" + + "e\022k\n\026DispatchMergingRegions\022\'.hbase.pb.D" + + "ispatchMergingRegionsRequest\032(.hbase.pb." + + "DispatchMergingRegionsResponse\022M\n\014Assign" + + "Region\022\035.hbase.pb.AssignRegionRequest\032\036." + + "hbase.pb.AssignRegionResponse\022S\n\016Unassig" + + "nRegion\022\037.hbase.pb.UnassignRegionRequest" + + "\032 .hbase.pb.UnassignRegionResponse\022P\n\rOf" + + "flineRegion\022\036.hbase.pb.OfflineRegionRequ", + "est\032\037.hbase.pb.OfflineRegionResponse\022J\n\013" + + "DeleteTable\022\034.hbase.pb.DeleteTableReques" + + "t\032\035.hbase.pb.DeleteTableResponse\022P\n\rtrun" + + "cateTable\022\036.hbase.pb.TruncateTableReques" + + "t\032\037.hbase.pb.TruncateTableResponse\022J\n\013En" + + "ableTable\022\034.hbase.pb.EnableTableRequest\032" + + "\035.hbase.pb.EnableTableResponse\022M\n\014Disabl" + + "eTable\022\035.hbase.pb.DisableTableRequest\032\036." + + "hbase.pb.DisableTableResponse\022J\n\013ModifyT" + + "able\022\034.hbase.pb.ModifyTableRequest\032\035.hba", + "se.pb.ModifyTableResponse\022J\n\013CreateTable" + + "\022\034.hbase.pb.CreateTableRequest\032\035.hbase.p" + + "b.CreateTableResponse\022A\n\010Shutdown\022\031.hbas" + + "e.pb.ShutdownRequest\032\032.hbase.pb.Shutdown" + + "Response\022G\n\nStopMaster\022\033.hbase.pb.StopMa" + + "sterRequest\032\034.hbase.pb.StopMasterRespons" + + "e\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031" + + ".hbase.pb.BalanceResponse\022_\n\022SetBalancer" + + "Running\022#.hbase.pb.SetBalancerRunningReq" + + "uest\032$.hbase.pb.SetBalancerRunningRespon", + "se\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBal" + + "ancerEnabledRequest\032#.hbase.pb.IsBalance" + + "rEnabledResponse\022k\n\026SetSplitOrMergeEnabl" + + "ed\022\'.hbase.pb.SetSplitOrMergeEnabledRequ" + + "est\032(.hbase.pb.SetSplitOrMergeEnabledRes" + + "ponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase.p" + + "b.IsSplitOrMergeEnabledRequest\032\'.hbase.p" + + "b.IsSplitOrMergeEnabledResponse\022D\n\tNorma" + + "lize\022\032.hbase.pb.NormalizeRequest\032\033.hbase" + + ".pb.NormalizeResponse\022e\n\024SetNormalizerRu", + "nning\022%.hbase.pb.SetNormalizerRunningReq" + + "uest\032&.hbase.pb.SetNormalizerRunningResp" + + "onse\022b\n\023IsNormalizerEnabled\022$.hbase.pb.I" + + "sNormalizerEnabledRequest\032%.hbase.pb.IsN" + + "ormalizerEnabledResponse\022S\n\016RunCatalogSc" + + "an\022\037.hbase.pb.RunCatalogScanRequest\032 .hb" + + "ase.pb.RunCatalogScanResponse\022e\n\024EnableC" + + "atalogJanitor\022%.hbase.pb.EnableCatalogJa" + + "nitorRequest\032&.hbase.pb.EnableCatalogJan" + + "itorResponse\022n\n\027IsCatalogJanitorEnabled\022", + "(.hbase.pb.IsCatalogJanitorEnabledReques" + + "t\032).hbase.pb.IsCatalogJanitorEnabledResp" + + "onse\022^\n\021ExecMasterService\022#.hbase.pb.Cop" + + "rocessorServiceRequest\032$.hbase.pb.Coproc" + + "essorServiceResponse\022A\n\010Snapshot\022\031.hbase" + + ".pb.SnapshotRequest\032\032.hbase.pb.SnapshotR" + + "esponse\022h\n\025GetCompletedSnapshots\022&.hbase" + + ".pb.GetCompletedSnapshotsRequest\032\'.hbase" + + ".pb.GetCompletedSnapshotsResponse\022S\n\016Del" + + "eteSnapshot\022\037.hbase.pb.DeleteSnapshotReq", + "uest\032 .hbase.pb.DeleteSnapshotResponse\022S" + + "\n\016IsSnapshotDone\022\037.hbase.pb.IsSnapshotDo" + + "neRequest\032 .hbase.pb.IsSnapshotDoneRespo" + + "nse\022V\n\017RestoreSnapshot\022 .hbase.pb.Restor" + + "eSnapshotRequest\032!.hbase.pb.RestoreSnaps" + + "hotResponse\022h\n\025IsRestoreSnapshotDone\022&.h" + + "base.pb.IsRestoreSnapshotDoneRequest\032\'.h" + + "base.pb.IsRestoreSnapshotDoneResponse\022P\n" + + "\rExecProcedure\022\036.hbase.pb.ExecProcedureR" + + "equest\032\037.hbase.pb.ExecProcedureResponse\022", + "W\n\024ExecProcedureWithRet\022\036.hbase.pb.ExecP" + + "rocedureRequest\032\037.hbase.pb.ExecProcedure" + + "Response\022V\n\017IsProcedureDone\022 .hbase.pb.I" + + "sProcedureDoneRequest\032!.hbase.pb.IsProce" + + "dureDoneResponse\022V\n\017ModifyNamespace\022 .hb" + + "ase.pb.ModifyNamespaceRequest\032!.hbase.pb" + + ".ModifyNamespaceResponse\022V\n\017CreateNamesp" + + "ace\022 .hbase.pb.CreateNamespaceRequest\032!." + + "hbase.pb.CreateNamespaceResponse\022V\n\017Dele" + + "teNamespace\022 .hbase.pb.DeleteNamespaceRe", + "quest\032!.hbase.pb.DeleteNamespaceResponse" + + "\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb.Ge" + + "tNamespaceDescriptorRequest\032(.hbase.pb.G" + + "etNamespaceDescriptorResponse\022q\n\030ListNam" + + "espaceDescriptors\022).hbase.pb.ListNamespa" + + "ceDescriptorsRequest\032*.hbase.pb.ListName" + + "spaceDescriptorsResponse\022\206\001\n\037ListTableDe" + + "scriptorsByNamespace\0220.hbase.pb.ListTabl" + + "eDescriptorsByNamespaceRequest\0321.hbase.p" + + "b.ListTableDescriptorsByNamespaceRespons", + "e\022t\n\031ListTableNamesByNamespace\022*.hbase.p" + + "b.ListTableNamesByNamespaceRequest\032+.hba" + + "se.pb.ListTableNamesByNamespaceResponse\022" + + "P\n\rGetTableState\022\036.hbase.pb.GetTableStat" + + "eRequest\032\037.hbase.pb.GetTableStateRespons" + + "e\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaRequest" + + "\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLastM" + + "ajorCompactionTimestamp\022).hbase.pb.Major" + + "CompactionTimestampRequest\032*.hbase.pb.Ma" + + "jorCompactionTimestampResponse\022\212\001\n(getLa", + "stMajorCompactionTimestampForRegion\0222.hb" + + "ase.pb.MajorCompactionTimestampForRegion" + + "Request\032*.hbase.pb.MajorCompactionTimest" + + "ampResponse\022_\n\022getProcedureResult\022#.hbas" + + "e.pb.GetProcedureResultRequest\032$.hbase.p" + + "b.GetProcedureResultResponse\022h\n\027getSecur" + + "ityCapabilities\022%.hbase.pb.SecurityCapab" + + "ilitiesRequest\032&.hbase.pb.SecurityCapabi" + + "litiesResponse\022S\n\016AbortProcedure\022\037.hbase" + + ".pb.AbortProcedureRequest\032 .hbase.pb.Abo", + "rtProcedureResponse\022S\n\016ListProcedures\022\037." + + "hbase.pb.ListProceduresRequest\032 .hbase.p" + + "b.ListProceduresResponseBB\n*org.apache.h" + + "adoop.hbase.protobuf.generatedB\014MasterPr" + + "otosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -70128,30 +65476,6 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); - internal_static_hbase_pb_BackupTablesRequest_descriptor = - getDescriptor().getMessageTypes().get(109); - internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_BackupTablesRequest_descriptor, - new java.lang.String[] { "Type", "Tables", "TargetRootDir", "Workers", "Bandwidth", "BackupSetName", "NonceGroup", "Nonce", }); - internal_static_hbase_pb_BackupTablesResponse_descriptor = - getDescriptor().getMessageTypes().get(110); - internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_BackupTablesResponse_descriptor, - new java.lang.String[] { "ProcId", "BackupId", }); - internal_static_hbase_pb_RestoreTablesRequest_descriptor = - getDescriptor().getMessageTypes().get(111); - internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_RestoreTablesRequest_descriptor, - new java.lang.String[] { "BackupId", "Tables", "TargetTables", "BackupRootDir", "DependencyCheckOnly", "Overwrite", "NonceGroup", "Nonce", }); - internal_static_hbase_pb_RestoreTablesResponse_descriptor = - getDescriptor().getMessageTypes().get(112); - internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_RestoreTablesResponse_descriptor, - new java.lang.String[] { "ProcId", }); return null; } }; @@ -70159,7 +65483,6 @@ public final class MasterProtos { .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), diff --git hbase-protocol/src/main/protobuf/Backup.proto hbase-protocol/src/main/protobuf/Backup.proto index 7d1ec4b..2b3feeb 100644 --- hbase-protocol/src/main/protobuf/Backup.proto +++ hbase-protocol/src/main/protobuf/Backup.proto @@ -27,7 +27,7 @@ option optimize_for = SPEED; import "HBase.proto"; -enum FullTableBackupState { +/*enum FullTableBackupState { PRE_SNAPSHOT_TABLE = 1; SNAPSHOT_TABLES = 2; SNAPSHOT_COPY = 3; @@ -44,7 +44,7 @@ message SnapshotTableStateData { required TableName table = 1; required string snapshotName = 2; } - +*/ enum BackupType { FULL = 0; INCREMENTAL = 1; @@ -119,9 +119,9 @@ message BackupInfo { STORE_MANIFEST = 5; } } - +/* message BackupProcContext { required BackupInfo ctx = 1; repeated ServerTimestamp server_timestamp = 2; } - +*/ diff --git hbase-protocol/src/main/protobuf/Master.proto hbase-protocol/src/main/protobuf/Master.proto index 13dbd28..54d6c93 100644 --- hbase-protocol/src/main/protobuf/Master.proto +++ hbase-protocol/src/main/protobuf/Master.proto @@ -27,7 +27,6 @@ option java_generate_equals_and_hash = true; option optimize_for = SPEED; import "HBase.proto"; -import "Backup.proto"; import "Client.proto"; import "ClusterStatus.proto"; import "ErrorHandling.proto"; @@ -541,42 +540,6 @@ message SecurityCapabilitiesResponse { repeated Capability capabilities = 1; } -message BackupTablesRequest { - required BackupType type = 1; - repeated TableName tables = 2; - required string target_root_dir = 3; - optional int64 workers = 4; - optional int64 bandwidth = 5; - optional string backup_set_name = 6; - optional uint64 nonce_group = 7 [default = 0]; - optional uint64 nonce = 8 [default = 0]; -} - -message BackupTablesResponse { - optional uint64 proc_id = 1; - optional string backup_id = 2; -} - -enum RestoreTablesState { - VALIDATION = 1; - RESTORE_IMAGES = 2; -} - -message RestoreTablesRequest { - required string backup_id = 1; - repeated TableName tables = 2; - repeated TableName target_tables = 3; - required string backup_root_dir = 4; - optional bool dependency_check_only = 5; - optional bool overwrite = 6; - optional uint64 nonce_group = 7 [default = 0]; - optional uint64 nonce = 8 [default = 0]; -} - -message RestoreTablesResponse { - optional uint64 proc_id = 1; -} - service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -852,11 +815,4 @@ service MasterService { rpc ListProcedures(ListProceduresRequest) returns(ListProceduresResponse); - /** backup table set */ - rpc backupTables(BackupTablesRequest) - returns(BackupTablesResponse); - - /** restore table set */ - rpc restoreTables(RestoreTablesRequest) - returns(RestoreTablesResponse); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java new file mode 100644 index 0000000..82bdd4e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Future; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +/** + * The administrative API for HBase Backup. Obtain an instance from + * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards. + *

BackupAdmin can be used to create backups, restore data from backups and for + * other backup-related operations. + * + * @see Admin + * @since 2.0 + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving + +public interface BackupAdmin extends Closeable{ + + /** + * Backs up given list of tables fully. Synchronous operation. + * + * @param request BackupRequest instance which contains the following members: + * type whether the backup is full or incremental + * tableList list of tables to backup + * targetRootDir root directory for saving the backup + * workers number of parallel workers. -1 - system defined + * bandwidth bandwidth per worker in MB per second. -1 - unlimited + * @return the backup Id + */ + + public String backupTables(final BackupRequest userRequest) throws IOException; + + /** + * Backs up given list of tables fully. Asynchronous operation. + * + * @param request BackupRequest instance which contains the following members: + * type whether the backup is full or incremental + * tableList list of tables to backup + * targetRootDir root dir for saving the backup + * workers number of paralle workers. -1 - system defined + * bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @return the backup Id future + */ + public Future backupTablesAsync(final BackupRequest userRequest) throws IOException; + + /** + * Restore backup + * @param request - restore request + * @throws IOException exception + */ + public void restore(RestoreRequest request) throws IOException; + + /** + * Restore backup + * @param request - restore request + * @return Future which client can wait on + * @throws IOException exception + */ + public Future restoreAsync(RestoreRequest request) throws IOException; + + /** + * Describe backup image command + * @param backupId - backup id + * @return backup info + * @throws IOException exception + */ + public BackupInfo getBackupInfo(String backupId) throws IOException; + + /** + * Show backup progress command + * @param backupId - backup id (may be null) + * @return backup progress (0-100%), -1 if no active sessions + * or session not found + * @throws IOException exception + */ + public int getProgress(String backupId) throws IOException; + + /** + * Delete backup image command + * @param backupIds - backup id + * @return total number of deleted sessions + * @throws IOException exception + */ + public int deleteBackups(String[] backupIds) throws IOException; + + /** + * Show backup history command + * @param n - last n backup sessions + * @return list of backup infos + * @throws IOException exception + */ + public List getHistory(int n) throws IOException; + + + /** + * Show backup history command with filters + * @param n - last n backup sessions + * @param f - list of filters + * @return list of backup infos + * @throws IOException exception + */ + public List getHistory(int n, BackupInfo.Filter ... f) throws IOException; + + + /** + * Backup sets list command - list all backup sets. Backup set is + * a named group of tables. + * @return all registered backup sets + * @throws IOException exception + */ + public List listBackupSets() throws IOException; + + /** + * Backup set describe command. Shows list of tables in + * this particular backup set. + * @param name set name + * @return backup set description or null + * @throws IOException exception + */ + public BackupSet getBackupSet(String name) throws IOException; + + /** + * Delete backup set command + * @param name - backup set name + * @return true, if success, false - otherwise + * @throws IOException exception + */ + public boolean deleteBackupSet(String name) throws IOException; + + /** + * Add tables to backup set command + * @param name - name of backup set. + * @param tables - list of tables to be added to this set. + * @throws IOException exception + */ + public void addToBackupSet(String name, TableName[] tables) throws IOException; + + /** + * Remove tables from backup set + * @param name - name of backup set. + * @param tables - list of tables to be removed from this set. + * @throws IOException exception + */ + public void removeFromBackupSet(String name, String[] tables) throws IOException; +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java new file mode 100644 index 0000000..be5ffea --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -0,0 +1,504 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; +import org.apache.hadoop.hbase.util.Bytes; + + +/** + * An object to encapsulate the information for each backup request + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupInfo implements Comparable { + private static final Log LOG = LogFactory.getLog(BackupInfo.class); + + public static interface Filter { + + /** + * Filter interface + * @param info: backup info + * @return true if info passes filter, false otherwise + */ + public boolean apply(BackupInfo info); + } + // backup status flag + public static enum BackupState { + WAITING, RUNNING, COMPLETE, FAILED, ANY; + } + + // backup phase + public static enum BackupPhase { + SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; + } + + // backup id: a timestamp when we request the backup + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // target root directory for storing the backup files + private String targetRootDir; + + // overall backup state + private BackupState state; + + // overall backup phase + private BackupPhase phase; + + // overall backup failure message + private String failedMsg; + + // backup status map for all tables + private Map backupStatusMap; + + // actual start timestamp of the backup process + private long startTs; + + // actual end timestamp of the backup process, could be fail or complete + private long endTs; + + // the total bytes of incremental logs copied + private long totalBytesCopied; + + // for incremental backup, the location of the backed-up hlogs + private String hlogTargetDir = null; + + // incremental backup file list + transient private List incrBackupFileList; + + // new region server log timestamps for table set after distributed log roll + // key - table name, value - map of RegionServer hostname -> last log rolled timestamp + transient private HashMap> tableSetTimestampMap; + + // backup progress in %% (0-100) + private int progress; + + // distributed job id + private String jobId; + + // Number of parallel workers. -1 - system defined + private int workers = -1; + + // Bandwidth per worker in MB per sec. -1 - unlimited + private long bandwidth = -1; + + public BackupInfo() { + backupStatusMap = new HashMap(); + } + + public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) { + this(); + this.backupId = backupId; + this.type = type; + this.targetRootDir = targetRootDir; + if (LOG.isDebugEnabled()) { + LOG.debug("CreateBackupContext: " + tables.length + " " + tables[0]); + } + this.addTables(tables); + + if (type == BackupType.INCREMENTAL) { + setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); + } + + this.startTs = 0; + this.endTs = 0; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public int getWorkers() { + return workers; + } + + public void setWorkers(int workers) { + this.workers = workers; + } + + public long getBandwidth() { + return bandwidth; + } + + public void setBandwidth(long bandwidth) { + this.bandwidth = bandwidth; + } + + public void setBackupStatusMap(Map backupStatusMap) { + this.backupStatusMap = backupStatusMap; + } + + public HashMap> getTableSetTimestampMap() { + return tableSetTimestampMap; + } + + public void + setTableSetTimestampMap(HashMap> tableSetTimestampMap) { + this.tableSetTimestampMap = tableSetTimestampMap; + } + + public String getHlogTargetDir() { + return hlogTargetDir; + } + + public void setType(BackupType type) { + this.type = type; + } + + public void setTargetRootDir(String targetRootDir) { + this.targetRootDir = targetRootDir; + } + + public void setTotalBytesCopied(long totalBytesCopied) { + this.totalBytesCopied = totalBytesCopied; + } + + /** + * Set progress (0-100%) + * @param msg progress value + */ + + public void setProgress(int p) { + this.progress = p; + } + + /** + * Get current progress + */ + public int getProgress() { + return progress; + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupStatus getBackupStatus(TableName table) { + return this.backupStatusMap.get(table); + } + + public String getFailedMsg() { + return failedMsg; + } + + public void setFailedMsg(String failedMsg) { + this.failedMsg = failedMsg; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getEndTs() { + return endTs; + } + + public void setEndTs(long endTs) { + this.endTs = endTs; + } + + public long getTotalBytesCopied() { + return totalBytesCopied; + } + + public BackupState getState() { + return state; + } + + public void setState(BackupState flag) { + this.state = flag; + } + + public BackupPhase getPhase() { + return phase; + } + + public void setPhase(BackupPhase phase) { + this.phase = phase; + } + + public BackupType getType() { + return type; + } + + public void setSnapshotName(TableName table, String snapshotName) { + this.backupStatusMap.get(table).setSnapshotName(snapshotName); + } + + public String getSnapshotName(TableName table) { + return this.backupStatusMap.get(table).getSnapshotName(); + } + + public List getSnapshotNames() { + List snapshotNames = new ArrayList(); + for (BackupStatus backupStatus : this.backupStatusMap.values()) { + snapshotNames.add(backupStatus.getSnapshotName()); + } + return snapshotNames; + } + + public Set getTables() { + return this.backupStatusMap.keySet(); + } + + public List getTableNames() { + return new ArrayList(backupStatusMap.keySet()); + } + + public void addTables(TableName[] tables) { + for (TableName table : tables) { + BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); + this.backupStatusMap.put(table, backupStatus); + } + } + + public void setTables(List tables) { + this.backupStatusMap.clear(); + for (TableName table : tables) { + BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); + this.backupStatusMap.put(table, backupStatus); + } + } + + public String getTargetRootDir() { + return targetRootDir; + } + + public void setHlogTargetDir(String hlogTagetDir) { + this.hlogTargetDir = hlogTagetDir; + } + + public String getHLogTargetDir() { + return hlogTargetDir; + } + + public List getIncrBackupFileList() { + return incrBackupFileList; + } + + public void setIncrBackupFileList(List incrBackupFileList) { + this.incrBackupFileList = incrBackupFileList; + } + + /** + * Set the new region server log timestamps after distributed log roll + * @param newTableSetTimestampMap table timestamp map + */ + public void + setIncrTimestampMap(HashMap> newTableSetTimestampMap) { + this.tableSetTimestampMap = newTableSetTimestampMap; + } + + /** + * Get new region server log timestamps after distributed log roll + * @return new region server log timestamps + */ + public HashMap> getIncrTimestampMap() { + return this.tableSetTimestampMap; + } + + public TableName getTableBySnapshot(String snapshotName) { + for (Entry entry : this.backupStatusMap.entrySet()) { + if (snapshotName.equals(entry.getValue().getSnapshotName())) { + return entry.getKey(); + } + } + return null; + } + + public BackupProtos.BackupInfo toProtosBackupInfo() { + BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder(); + builder.setBackupId(getBackupId()); + setBackupStatusMap(builder); + builder.setEndTs(getEndTs()); + if (getFailedMsg() != null) { + builder.setFailedMessage(getFailedMsg()); + } + if (getState() != null) { + builder.setState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name())); + } + if (getPhase() != null) { + builder.setPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name())); + } + + builder.setProgress(getProgress()); + builder.setStartTs(getStartTs()); + builder.setTargetRootDir(getTargetRootDir()); + builder.setType(BackupProtos.BackupType.valueOf(getType().name())); + builder.setWorkersNumber(workers); + builder.setBandwidth(bandwidth); + if (jobId != null) { + builder.setJobId(jobId); + } + return builder.build(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof BackupInfo) { + BackupInfo other = (BackupInfo) obj; + try { + return Bytes.equals(toByteArray(), other.toByteArray()); + } catch (IOException e) { + LOG.error(e); + return false; + } + } else { + return false; + } + } + + public byte[] toByteArray() throws IOException { + return toProtosBackupInfo().toByteArray(); + } + + private void setBackupStatusMap(Builder builder) { + for (Entry entry : backupStatusMap.entrySet()) { + builder.addTableBackupStatus(entry.getValue().toProto()); + } + } + + public static BackupInfo fromByteArray(byte[] data) throws IOException { + return fromProto(BackupProtos.BackupInfo.parseFrom(data)); + } + + public static BackupInfo fromStream(final InputStream stream) throws IOException { + return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream)); + } + + public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { + BackupInfo context = new BackupInfo(); + context.setBackupId(proto.getBackupId()); + context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); + context.setEndTs(proto.getEndTs()); + if (proto.hasFailedMessage()) { + context.setFailedMsg(proto.getFailedMessage()); + } + if (proto.hasState()) { + context.setState(BackupInfo.BackupState.valueOf(proto.getState().name())); + } + + context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), + proto.getBackupId())); + + if (proto.hasPhase()) { + context.setPhase(BackupPhase.valueOf(proto.getPhase().name())); + } + if (proto.hasProgress()) { + context.setProgress(proto.getProgress()); + } + context.setStartTs(proto.getStartTs()); + context.setTargetRootDir(proto.getTargetRootDir()); + context.setType(BackupType.valueOf(proto.getType().name())); + context.setWorkers(proto.getWorkersNumber()); + context.setBandwidth(proto.getBandwidth()); + if (proto.hasJobId()) { + context.setJobId(proto.getJobId()); + } + return context; + } + + private static Map toMap(List list) { + HashMap map = new HashMap<>(); + for (TableBackupStatus tbs : list) { + map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); + } + return map; + } + + public String getShortDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("ID : " + backupId).append("\n"); + sb.append("Type : " + getType()).append("\n"); + sb.append("Tables : " + getTableListAsString()).append("\n"); + sb.append("State : " + getState()).append("\n"); + Date date = null; + Calendar cal = Calendar.getInstance(); + cal.setTimeInMillis(getStartTs()); + date = cal.getTime(); + sb.append("Start time : " + date).append("\n"); + if (state == BackupState.FAILED) { + sb.append("Failed message : " + getFailedMsg()).append("\n"); + } else if (state == BackupState.RUNNING) { + sb.append("Phase : " + getPhase()).append("\n"); + } else if (state == BackupState.COMPLETE) { + cal = Calendar.getInstance(); + cal.setTimeInMillis(getEndTs()); + date = cal.getTime(); + sb.append("End time : " + date).append("\n"); + } + sb.append("Progress : " + getProgress()).append("\n"); + return sb.toString(); + } + + public String getStatusAndProgressAsString() { + StringBuilder sb = new StringBuilder(); + sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) + .append(" progress: ").append(getProgress()); + return sb.toString(); + } + + public String getTableListAsString() { + return StringUtils.join(backupStatusMap.keySet(), ","); + } + + @Override + public int compareTo(BackupInfo o) { + Long thisTS = new Long(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); + Long otherTS = new Long(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java new file mode 100644 index 0000000..d141239 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * POJO class for backup request + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class BackupRequest { + private BackupType type; + private List tableList; + private String targetRootDir; + private int workers = -1; + private long bandwidth = -1L; + private String backupSetName; + + public BackupRequest() { + } + + public BackupRequest setBackupType(BackupType type) { + this.type = type; + return this; + } + public BackupType getBackupType() { + return this.type; + } + + public BackupRequest setTableList(List tableList) { + this.tableList = tableList; + return this; + } + public List getTableList() { + return this.tableList; + } + + public BackupRequest setTargetRootDir(String targetRootDir) { + this.targetRootDir = targetRootDir; + return this; + } + public String getTargetRootDir() { + return this.targetRootDir; + } + + public BackupRequest setWorkers(int workers) { + this.workers = workers; + return this; + } + public int getWorkers() { + return this.workers; + } + + public BackupRequest setBandwidth(long bandwidth) { + this.bandwidth = bandwidth; + return this; + } + public long getBandwidth() { + return this.bandwidth; + } + + public String getBackupSetName() { + return backupSetName; + } + + public void setBackupSetName(String backupSetName) { + this.backupSetName = backupSetName; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java new file mode 100644 index 0000000..c82e05a --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.Serializable; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * Backup status and related information encapsulated for a table. + * At this moment only TargetDir and SnapshotName is encapsulated here. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupStatus implements Serializable { + + private static final long serialVersionUID = -5968397963548535982L; + + // table name for backup + private TableName table; + + // target directory of the backup image for this table + private String targetDir; + + // snapshot name for offline/online snapshot + private String snapshotName = null; + + public BackupStatus() { + + } + + public BackupStatus(TableName table, String targetRootDir, String backupId) { + this.table = table; + this.targetDir = BackupClientUtil.getTableBackupDir(targetRootDir, backupId, table); + } + + public String getSnapshotName() { + return snapshotName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getTargetDir() { + return targetDir; + } + + public TableName getTable() { + return table; + } + + public void setTable(TableName table) { + this.table = table; + } + + public void setTargetDir(String targetDir) { + this.targetDir = targetDir; + } + + public static BackupStatus convert(BackupProtos.TableBackupStatus proto) + { + BackupStatus bs = new BackupStatus(); + bs.setTable(ProtobufUtil.toTableName(proto.getTable())); + bs.setTargetDir(proto.getTargetDir()); + if(proto.hasSnapshot()){ + bs.setSnapshotName(proto.getSnapshot()); + } + return bs; + } + + public BackupProtos.TableBackupStatus toProto() { + BackupProtos.TableBackupStatus.Builder builder = + BackupProtos.TableBackupStatus.newBuilder(); + if(snapshotName != null) { + builder.setSnapshot(snapshotName); + } + builder.setTable(ProtobufUtil.toProtoTableName(table)); + builder.setTargetDir(targetDir); + return builder.build(); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index d3237f7..ce3bb65 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.util.AbstractHBaseTool; @@ -124,7 +124,7 @@ public class RestoreDriver extends AbstractHBaseTool { String tables = null; String tableMapping = null; try (final Connection conn = ConnectionFactory.createConnection(conf); - BackupAdmin client = conn.getAdmin().getBackupAdmin();) { + BackupAdmin client = new HBaseBackupAdmin(conn);) { // Check backup set if (cmd.hasOption(OPTION_SET)) { String setName = cmd.getOptionValue(OPTION_SET); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java new file mode 100644 index 0000000..7490d20 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * POJO class for restore request + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RestoreRequest { + + private String backupRootDir; + private String backupId; + private boolean check = false; + private TableName[] fromTables; + private TableName[] toTables; + private boolean overwrite = false; + + public RestoreRequest() { + } + + public String getBackupRootDir() { + return backupRootDir; + } + + public RestoreRequest setBackupRootDir(String backupRootDir) { + this.backupRootDir = backupRootDir; + return this; + } + + public String getBackupId() { + return backupId; + } + + public RestoreRequest setBackupId(String backupId) { + this.backupId = backupId; + return this; + } + + public boolean isCheck() { + return check; + } + + public RestoreRequest setCheck(boolean check) { + this.check = check; + return this; + } + + public TableName[] getFromTables() { + return fromTables; + } + + public RestoreRequest setFromTables(TableName[] fromTables) { + this.fromTables = fromTables; + return this; + } + + public TableName[] getToTables() { + return toTables; + } + + public RestoreRequest setToTables(TableName[] toTables) { + this.toTables = toTables; + return this; + } + + public boolean isOverwrite() { + return overwrite; + } + + public RestoreRequest setOverwrite(boolean overwrite) { + this.overwrite = overwrite; + return this; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java new file mode 100644 index 0000000..478d62d --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -0,0 +1,720 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +import com.google.common.collect.Lists; + +/** + * General backup commands, options and usage messages + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupCommands { + + public final static String INCORRECT_USAGE = "Incorrect usage"; + + public static final String USAGE = "Usage: hbase backup COMMAND [command-specific arguments]\n" + + "where COMMAND is one of:\n" + + " create create a new backup image\n" + + " delete delete an existing backup image\n" + + " describe show the detailed information of a backup image\n" + + " history show history of all successful backups\n" + + " progress show the progress of the latest backup request\n" + + " set backup set management\n" + + "Run \'hbase backup COMMAND -h\' to see help message for each command\n"; + + public static final String CREATE_CMD_USAGE = + "Usage: hbase backup create [tables] [-set name] " + + "[-w workers][-b bandwith]\n" + + " type \"full\" to create a full backup image\n" + + " \"incremental\" to create an incremental backup image\n" + + " BACKUP_ROOT The full root path to store the backup image,\n" + + " the prefix can be hdfs, webhdfs or gpfs\n" + + "Options:\n" + + " tables If no tables (\"\") are specified, all tables are backed up.\n" + + " Otherwise it is a comma separated list of tables.\n" + + " -w number of parallel workers (MapReduce tasks).\n" + + " -b bandwith per one worker (MapReduce task) in MBs per sec\n" + + " -set name of backup set to use (mutually exclusive with [tables])" ; + + public static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n" + + " backupId backup image id\n"; + public static final String NO_INFO_FOUND = "No info was found for backup id: "; + + public static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n" + + " backupId backup image id\n"; + + public static final String HISTORY_CMD_USAGE = + "Usage: hbase backup history [-path BACKUP_ROOT] [-n N] [-t table]\n" + + " -n N show up to N last backup sessions, default - 10\n" + + " -path backup root path\n" + + " -t table table name. If specified, only backup images which contain this table\n" + + " will be listed." ; + + + public static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n" + + " backupId backup image id\n"; + + public static final String CANCEL_CMD_USAGE = "Usage: hbase backup cancel \n" + + " backupId backup image id\n"; + + public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" + + " name Backup set name\n" + + " tables If no tables (\"\") are specified, all tables will belong to the set.\n" + + " Otherwise it is a comma separated list of tables.\n" + + "COMMAND is one of:\n" + + " add add tables to a set, create a set if needed\n" + + " remove remove tables from a set\n" + + " list list all backup sets in the system\n" + + " describe describe set\n" + + " delete delete backup set\n"; + + public static abstract class Command extends Configured { + CommandLine cmdline; + + Command(Configuration conf) { + super(conf); + } + + public void execute() throws IOException + { + if (cmdline.hasOption("h") || cmdline.hasOption("help")) { + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + } + + protected abstract void printUsage(); + } + + private BackupCommands() { + throw new AssertionError("Instantiating utility class..."); + } + + public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) { + Command cmd = null; + switch (type) { + case CREATE: + cmd = new CreateCommand(conf, cmdline); + break; + case DESCRIBE: + cmd = new DescribeCommand(conf, cmdline); + break; + case PROGRESS: + cmd = new ProgressCommand(conf, cmdline); + break; + case DELETE: + cmd = new DeleteCommand(conf, cmdline); + break; + case CANCEL: + cmd = new CancelCommand(conf, cmdline); + break; + case HISTORY: + cmd = new HistoryCommand(conf, cmdline); + break; + case SET: + cmd = new BackupSetCommand(conf, cmdline); + break; + case HELP: + default: + cmd = new HelpCommand(conf, cmdline); + break; + } + return cmd; + } + + static int numOfArgs(String[] args) { + if (args == null) return 0; + return args.length; + } + + public static class CreateCommand extends Command { + + CreateCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + if (cmdline == null || cmdline.getArgs() == null) { + System.err.println("ERROR: missing arguments"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + String[] args = cmdline.getArgs(); + if (args.length < 3 || args.length > 4) { + System.err.println("ERROR: wrong number of arguments: "+ args.length); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + if (!BackupType.FULL.toString().equalsIgnoreCase(args[1]) + && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) { + System.err.println("ERROR: invalid backup type: "+ args[1]); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String tables = null; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + + // Check backup set + String setName = null; + if (cmdline.hasOption("set")) { + setName = cmdline.getOptionValue("set"); + tables = getTablesForSet(setName, conf); + + if (tables == null) { + System.err.println("ERROR: Backup set '" + setName+ "' is either empty or does not exist"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + } else { + tables = (args.length == 4) ? args[3] : null; + } + int bandwidth = cmdline.hasOption('b') ? Integer.parseInt(cmdline.getOptionValue('b')) : -1; + int workers = cmdline.hasOption('w') ? Integer.parseInt(cmdline.getOptionValue('w')) : -1; + + try (Connection conn = ConnectionFactory.createConnection(getConf()); + HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.valueOf(args[1].toUpperCase())) + .setTableList(tables != null?Lists.newArrayList(BackupClientUtil.parseTableNames(tables)): null) + .setTargetRootDir(args[2]).setWorkers(workers).setBandwidth(bandwidth) + .setBackupSetName(setName); + + String backupId = admin.backupTables(request); + System.out.println("Backup session "+ backupId+" finished. Status: SUCCESS"); + } catch (IOException e) { + System.err.println("Backup session finished. Status: FAILURE"); + throw e; + } + } + + + + private String getTablesForSet(String name, Configuration conf) + throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List tables = table.describeBackupSet(name); + if (tables == null) return null; + return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + } + } + + @Override + protected void printUsage() { + System.err.println(CREATE_CMD_USAGE); + } + } + + private static class HelpCommand extends Command { + + HelpCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + if (cmdline == null) { + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String[] args = cmdline.getArgs(); + if (args == null || args.length == 0) { + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + if (args.length != 2) { + System.err.println("Only supports help message of a single command type"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String type = args[1]; + + if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) { + System.out.println(CREATE_CMD_USAGE); + } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(type)) { + System.out.println(DESCRIBE_CMD_USAGE); + } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(type)) { + System.out.println(HISTORY_CMD_USAGE); + } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(type)) { + System.out.println(PROGRESS_CMD_USAGE); + } else if (BackupCommand.DELETE.name().equalsIgnoreCase(type)) { + System.out.println(DELETE_CMD_USAGE); + } else if (BackupCommand.CANCEL.name().equalsIgnoreCase(type)) { + System.out.println(CANCEL_CMD_USAGE); + } else if (BackupCommand.SET.name().equalsIgnoreCase(type)) { + System.out.println(SET_CMD_USAGE); + } else { + System.out.println("Unknown command : " + type); + printUsage(); + } + } + + @Override + protected void printUsage() { + System.err.println(USAGE); + } + } + + private static class DescribeCommand extends Command { + + DescribeCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + if (cmdline == null || cmdline.getArgs() == null) { + System.err.println("ERROR: missing arguments"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + String[] args = cmdline.getArgs(); + if (args.length != 2) { + System.err.println("ERROR: wrong number of arguments"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String backupId = args[1]; + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable sysTable = new BackupSystemTable(conn);) { + BackupInfo info = sysTable.readBackupInfo(backupId); + if (info == null) { + System.err.println("ERROR: " + backupId + " does not exist"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + System.out.println(info.getShortDescription()); + } + } + + @Override + protected void printUsage() { + System.err.println(DESCRIBE_CMD_USAGE); + } + } + + private static class ProgressCommand extends Command { + + ProgressCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + + if (cmdline == null || cmdline.getArgs() == null || + cmdline.getArgs().length == 1) { + System.err.println("No backup id was specified, " + + "will retrieve the most recent (ongoing) sessions"); + } + String[] args = cmdline.getArgs(); + if (args.length > 2) { + System.err.println("ERROR: wrong number of arguments: " + args.length); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String backupId = (args == null || args.length <= 1) ? null : args[1]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable sysTable = new BackupSystemTable(conn);){ + BackupInfo info = sysTable.readBackupInfo(backupId); + int progress = info == null? -1: info.getProgress(); + if(progress < 0){ + System.err.println(NO_INFO_FOUND + backupId); + } else{ + System.out.println(backupId+" progress=" + progress+"%"); + } + } + } + + @Override + protected void printUsage() { + System.err.println(PROGRESS_CMD_USAGE); + } + } + + private static class DeleteCommand extends Command { + + DeleteCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + System.err.println("No backup id(s) was specified"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String[] args = cmdline.getArgs(); + + String[] backupIds = new String[args.length - 1]; + System.arraycopy(args, 1, backupIds, 0, backupIds.length); + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + try (final Connection conn = ConnectionFactory.createConnection(conf); + HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + int deleted = admin.deleteBackups(args); + System.out.println("Deleted " + deleted + " backups. Total requested: " + args.length); + } + + } + + @Override + protected void printUsage() { + System.err.println(DELETE_CMD_USAGE); + } + } + +// TODO Cancel command + + private static class CancelCommand extends Command { + + CancelCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + System.out.println("No backup id(s) was specified, will use the most recent one"); + } + String[] args = cmdline.getArgs(); + String backupId = args == null || args.length == 0 ? null : args[1]; + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + try (final Connection conn = ConnectionFactory.createConnection(conf); + HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);) { + // TODO cancel backup + } + } + + @Override + protected void printUsage() { + } + } + + private static class HistoryCommand extends Command { + + private final static int DEFAULT_HISTORY_LENGTH = 10; + + HistoryCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + + super.execute(); + + int n = parseHistoryLength(); + final TableName tableName = getTableName(); + final String setName = getTableSetName(); + BackupInfo.Filter tableNameFilter = new BackupInfo.Filter() { + @Override + public boolean apply(BackupInfo info) { + if (tableName == null) return true; + List names = info.getTableNames(); + return names.contains(tableName); + } + }; + BackupInfo.Filter tableSetFilter = new BackupInfo.Filter() { + @Override + public boolean apply(BackupInfo info) { + if (setName == null) return true; + String backupId = info.getBackupId(); + return backupId.startsWith(setName); + } + }; + Path backupRootPath = getBackupRootPath(); + List history = null; + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + if (backupRootPath == null) { + // Load from hbase:backup + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable sysTable = new BackupSystemTable(conn);) { + + history = sysTable.getBackupHistory(n, tableNameFilter, tableSetFilter); + } + } else { + // load from backup FS + history = BackupClientUtil.getHistory(conf, n, backupRootPath, + tableNameFilter, tableSetFilter); + } + for (BackupInfo info : history) { + System.out.println(info.getShortDescription()); + } + } + + private Path getBackupRootPath() throws IOException { + String value = null; + try{ + value = cmdline.getOptionValue("path"); + if (value == null) return null; + return new Path(value); + } catch (IllegalArgumentException e) { + System.err.println("ERROR: Illegal argument for backup root path: "+ value); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + } + + private TableName getTableName() throws IOException { + String value = cmdline.getOptionValue("t"); + if (value == null) return null; + try{ + return TableName.valueOf(value); + } catch (IllegalArgumentException e){ + System.err.println("Illegal argument for table name: "+ value); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + } + + private String getTableSetName() throws IOException { + String value = cmdline.getOptionValue("set"); + return value; + } + + private int parseHistoryLength() throws IOException { + String value = cmdline.getOptionValue("n"); + try{ + if (value == null) return DEFAULT_HISTORY_LENGTH; + return Integer.parseInt(value); + } catch(NumberFormatException e) { + System.err.println("Illegal argument for history length: "+ value); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + } + + @Override + protected void printUsage() { + System.err.println(HISTORY_CMD_USAGE); + } + } + + private static class BackupSetCommand extends Command { + private final static String SET_ADD_CMD = "add"; + private final static String SET_REMOVE_CMD = "remove"; + private final static String SET_DELETE_CMD = "delete"; + private final static String SET_DESCRIBE_CMD = "describe"; + private final static String SET_LIST_CMD = "list"; + + BackupSetCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + super.execute(); + // Command-line must have at least one element + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + System.err.println("ERROR: Command line format"); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String[] args = cmdline.getArgs(); + String cmdStr = args[1]; + BackupCommand cmd = getCommand(cmdStr); + + switch (cmd) { + case SET_ADD: + processSetAdd(args); + break; + case SET_REMOVE: + processSetRemove(args); + break; + case SET_DELETE: + processSetDelete(args); + break; + case SET_DESCRIBE: + processSetDescribe(args); + break; + case SET_LIST: + processSetList(args); + break; + default: + break; + + } + } + + private void processSetList(String[] args) throws IOException { + // List all backup set names + // does not expect any args + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + List list = admin.listBackupSets(); + for(BackupSet bs: list){ + System.out.println(bs); + } + } + } + + private void processSetDescribe(String[] args) throws IOException { + if (args == null || args.length != 3) { + System.err.println("ERROR: Wrong number of args for 'set describe' command: " + + numOfArgs(args)); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + String setName = args[2]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable sysTable = new BackupSystemTable(conn);){ + List tables = sysTable.describeBackupSet(setName); + BackupSet set = tables == null? null : new BackupSet(setName, tables); + if(set == null) { + System.out.println("Set '"+setName+"' does not exist."); + } else{ + System.out.println(set); + } + } + } + + private void processSetDelete(String[] args) throws IOException { + if (args == null || args.length != 3) { + System.err.println("ERROR: Wrong number of args for 'set delete' command: " + + numOfArgs(args)); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + String setName = args[2]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + boolean result = admin.deleteBackupSet(setName); + if(result){ + System.out.println("Delete set "+setName+" OK."); + } else{ + System.out.println("Set "+setName+" does not exist"); + } + } + } + + private void processSetRemove(String[] args) throws IOException { + if (args == null || args.length != 4) { + System.err.println("ERROR: Wrong number of args for 'set remove' command: " + + numOfArgs(args)); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + + String setName = args[2]; + String[] tables = args[3].split(","); + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + admin.removeFromBackupSet(setName, tables); + } + } + + private void processSetAdd(String[] args) throws IOException { + if (args == null || args.length != 4) { + System.err.println("ERROR: Wrong number of args for 'set add' command: " + + numOfArgs(args)); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + String setName = args[2]; + String[] tables = args[3].split(","); + TableName[] tableNames = new TableName[tables.length]; + for(int i=0; i < tables.length; i++){ + tableNames[i] = TableName.valueOf(tables[i]); + } + Configuration conf = getConf() != null? getConf():HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final HBaseBackupAdmin admin = new HBaseBackupAdmin(conn);){ + admin.addToBackupSet(setName, tableNames); + } + + } + + private BackupCommand getCommand(String cmdStr) throws IOException { + if (cmdStr.equals(SET_ADD_CMD)) { + return BackupCommand.SET_ADD; + } else if (cmdStr.equals(SET_REMOVE_CMD)) { + return BackupCommand.SET_REMOVE; + } else if (cmdStr.equals(SET_DELETE_CMD)) { + return BackupCommand.SET_DELETE; + } else if (cmdStr.equals(SET_DESCRIBE_CMD)) { + return BackupCommand.SET_DESCRIBE; + } else if (cmdStr.equals(SET_LIST_CMD)) { + return BackupCommand.SET_LIST; + } else { + System.err.println("ERROR: Unknown command for 'set' :" + cmdStr); + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + } + + @Override + protected void printUsage() { + System.err.println(SET_CMD_USAGE); + } + + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java new file mode 100644 index 0000000..ca204b4 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Backup exception + */ +@SuppressWarnings("serial") +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupException extends HBaseIOException { + private BackupInfo description; + + /** + * Some exception happened for a backup and don't even know the backup that it was about + * @param msg Full description of the failure + */ + public BackupException(String msg) { + super(msg); + } + + /** + * Some exception happened for a backup with a cause + * @param cause the cause + */ + public BackupException(Throwable cause) { + super(cause); + } + + /** + * Exception for the given backup that has no previous root cause + * @param msg reason why the backup failed + * @param desc description of the backup that is being failed + */ + public BackupException(String msg, BackupInfo desc) { + super(msg); + this.description = desc; + } + + /** + * Exception for the given backup due to another exception + * @param msg reason why the backup failed + * @param cause root cause of the failure + * @param desc description of the backup that is being failed + */ + public BackupException(String msg, Throwable cause, BackupInfo desc) { + super(msg, cause); + this.description = desc; + } + + /** + * Exception when the description of the backup cannot be determined, due to some other root + * cause + * @param message description of what caused the failure + * @param e root cause + */ + public BackupException(String message, Exception e) { + super(message, e); + } + + public BackupInfo getBackupContext() { + return this.description; + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java new file mode 100644 index 0000000..d10713d --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -0,0 +1,791 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + + +/** + * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled + * as manifest file together with data. So that each backup image will contain all the info needed + * for restore. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupManifest { + + private static final Log LOG = LogFactory.getLog(BackupManifest.class); + + // manifest file name + public static final String MANIFEST_FILE_NAME = ".backup.manifest"; + + // manifest file version, current is 1.0 + public static final String MANIFEST_VERSION = "1.0"; + + // backup image, the dependency graph is made up by series of backup images + + public static class BackupImage implements Comparable { + + private String backupId; + private BackupType type; + private String rootDir; + private List tableList; + private long startTs; + private long completeTs; + private ArrayList ancestors; + + public BackupImage() { + super(); + } + + public BackupImage(String backupId, BackupType type, String rootDir, + List tableList, long startTs, long completeTs) { + this.backupId = backupId; + this.type = type; + this.rootDir = rootDir; + this.tableList = tableList; + this.startTs = startTs; + this.completeTs = completeTs; + } + + static BackupImage fromProto(BackupProtos.BackupImage im) { + String backupId = im.getBackupId(); + String rootDir = im.getRootDir(); + long startTs = im.getStartTs(); + long completeTs = im.getCompleteTs(); + List tableListList = im.getTableListList(); + List tableList = new ArrayList(); + for(HBaseProtos.TableName tn : tableListList) { + tableList.add(ProtobufUtil.toTableName(tn)); + } + + List ancestorList = im.getAncestorsList(); + + BackupType type = + im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: + BackupType.INCREMENTAL; + + BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); + for(BackupProtos.BackupImage img: ancestorList) { + image.addAncestor(fromProto(img)); + } + return image; + } + + BackupProtos.BackupImage toProto() { + BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder(); + builder.setBackupId(backupId); + builder.setCompleteTs(completeTs); + builder.setStartTs(startTs); + builder.setRootDir(rootDir); + if (type == BackupType.FULL) { + builder.setBackupType(BackupProtos.BackupType.FULL); + } else{ + builder.setBackupType(BackupProtos.BackupType.INCREMENTAL); + } + + for (TableName name: tableList) { + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + + if (ancestors != null){ + for (BackupImage im: ancestors){ + builder.addAncestors(im.toProto()); + } + } + + return builder.build(); + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + public String getRootDir() { + return rootDir; + } + + public void setRootDir(String rootDir) { + this.rootDir = rootDir; + } + + public List getTableNames() { + return tableList; + } + + public void setTableList(List tableList) { + this.tableList = tableList; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getCompleteTs() { + return completeTs; + } + + public void setCompleteTs(long completeTs) { + this.completeTs = completeTs; + } + + public ArrayList getAncestors() { + if (this.ancestors == null) { + this.ancestors = new ArrayList(); + } + return this.ancestors; + } + + public void addAncestor(BackupImage backupImage) { + this.getAncestors().add(backupImage); + } + + public boolean hasAncestor(String token) { + for (BackupImage image : this.getAncestors()) { + if (image.getBackupId().equals(token)) { + return true; + } + } + return false; + } + + public boolean hasTable(TableName table) { + for (TableName t : tableList) { + if (t.equals(table)) { + return true; + } + } + return false; + } + + @Override + public int compareTo(BackupImage other) { + String thisBackupId = this.getBackupId(); + String otherBackupId = other.getBackupId(); + int index1 = thisBackupId.lastIndexOf("_"); + int index2 = otherBackupId.lastIndexOf("_"); + String name1 = thisBackupId.substring(0, index1); + String name2 = otherBackupId.substring(0, index2); + if(name1.equals(name2)) { + Long thisTS = new Long(thisBackupId.substring(index1 + 1)); + Long otherTS = new Long(otherBackupId.substring(index2 + 1)); + return thisTS.compareTo(otherTS); + } else { + return name1.compareTo(name2); + } + } + } + + // manifest version + private String version = MANIFEST_VERSION; + + // hadoop hbase configuration + protected Configuration config = null; + + // backup root directory + private String rootDir = null; + + // backup image directory + private String tableBackupDir = null; + + // backup log directory if this is an incremental backup + private String logBackupDir = null; + + // backup token + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // the table list for the backup + private ArrayList tableList; + + // actual start timestamp of the backup process + private long startTs; + + // actual complete timestamp of the backup process + private long completeTs; + + // the region server timestamp for tables: + // > + private Map> incrTimeRanges; + + // dependency of this backup, including all the dependent images to do PIT recovery + private Map dependency; + + /** + * Construct manifest for a ongoing backup. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupInfo backupCtx) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + this.loadTableList(backupCtx.getTableNames()); + } + + + /** + * Construct a table level manifest for a backup of the named table. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupInfo backupCtx, TableName table) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + List tables = new ArrayList(); + tables.add(table); + this.loadTableList(tables); + } + + /** + * Construct manifest from a backup directory. + * @param conf configuration + * @param backupPath backup path + * @throws IOException + */ + + public BackupManifest(Configuration conf, Path backupPath) throws IOException { + this(backupPath.getFileSystem(conf), backupPath); + } + + /** + * Construct manifest from a backup directory. + * @param conf configuration + * @param backupPath backup path + * @throws BackupException exception + */ + + public BackupManifest(FileSystem fs, Path backupPath) throws BackupException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading manifest from: " + backupPath.toString()); + } + // The input backupDir may not exactly be the backup table dir. + // It could be the backup log dir where there is also a manifest file stored. + // This variable's purpose is to keep the correct and original location so + // that we can store/persist it. + this.tableBackupDir = backupPath.toString(); + this.config = fs.getConf(); + try { + + FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); + if (subFiles == null) { + String errorMsg = backupPath.toString() + " does not exist"; + LOG.error(errorMsg); + throw new IOException(errorMsg); + } + for (FileStatus subFile : subFiles) { + if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) { + + // load and set manifest field from file content + FSDataInputStream in = fs.open(subFile.getPath()); + long len = subFile.getLen(); + byte[] pbBytes = new byte[(int) len]; + in.readFully(pbBytes); + BackupProtos.BackupManifest proto = null; + try{ + proto = parseFrom(pbBytes); + } catch(Exception e){ + throw new BackupException(e); + } + this.version = proto.getVersion(); + this.backupId = proto.getBackupId(); + this.type = BackupType.valueOf(proto.getType().name()); + // Here the parameter backupDir is where the manifest file is. + // There should always be a manifest file under: + // backupRootDir/namespace/table/backupId/.backup.manifest + this.rootDir = backupPath.getParent().getParent().getParent().toString(); + + Path p = backupPath.getParent(); + if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { + this.rootDir = p.getParent().toString(); + } else { + this.rootDir = p.getParent().getParent().toString(); + } + + loadTableList(proto); + this.startTs = proto.getStartTs(); + this.completeTs = proto.getCompleteTs(); + loadIncrementalTimestampMap(proto); + loadDependency(proto); + //TODO: merge will be implemented by future jira + LOG.debug("Loaded manifest instance from manifest file: " + + BackupClientUtil.getPath(subFile.getPath())); + return; + } + } + String errorMsg = "No manifest file found in: " + backupPath.toString(); + throw new IOException(errorMsg); + + } catch (IOException e) { + throw new BackupException(e.getMessage()); + } + } + + private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) { + List list = proto.getTstMapList(); + if(list == null || list.size() == 0) return; + this.incrTimeRanges = new HashMap>(); + for(BackupProtos.TableServerTimestamp tst: list){ + TableName tn = ProtobufUtil.toTableName(tst.getTable()); + HashMap map = this.incrTimeRanges.get(tn); + if(map == null){ + map = new HashMap(); + this.incrTimeRanges.put(tn, map); + } + List listSt = tst.getServerTimestampList(); + for(BackupProtos.ServerTimestamp stm: listSt) { + map.put(stm.getServer(), stm.getTimestamp()); + } + } + } + + private void loadDependency(BackupProtos.BackupManifest proto) { + if(LOG.isDebugEnabled()) { + LOG.debug("load dependency for: "+proto.getBackupId()); + } + + dependency = new HashMap(); + List list = proto.getDependentBackupImageList(); + for (BackupProtos.BackupImage im : list) { + BackupImage bim = BackupImage.fromProto(im); + if(im.getBackupId() != null){ + dependency.put(im.getBackupId(), bim); + } else{ + LOG.warn("Load dependency for backup manifest: "+ backupId+ + ". Null backup id in dependent image"); + } + } + } + + private void loadTableList(BackupProtos.BackupManifest proto) { + this.tableList = new ArrayList(); + List list = proto.getTableListList(); + for (HBaseProtos.TableName name: list) { + this.tableList.add(ProtobufUtil.toTableName(name)); + } + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + /** + * Loads table list. + * @param tableList Table list + */ + private void loadTableList(List tableList) { + + this.tableList = this.getTableList(); + if (this.tableList.size() > 0) { + this.tableList.clear(); + } + for (int i = 0; i < tableList.size(); i++) { + this.tableList.add(tableList.get(i)); + } + + LOG.debug(tableList.size() + " tables exist in table set."); + } + + /** + * Get the table set of this image. + * @return The table set list + */ + public ArrayList getTableList() { + if (this.tableList == null) { + this.tableList = new ArrayList(); + } + return this.tableList; + } + + /** + * Persist the manifest file. + * @throws IOException IOException when storing the manifest file. + */ + + public void store(Configuration conf) throws BackupException { + byte[] data = toByteArray(); + + // write the file, overwrite if already exist + Path manifestFilePath = + new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) + ,MANIFEST_FILE_NAME); + try { + FSDataOutputStream out = + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); + out.write(data); + out.close(); + } catch (IOException e) { + throw new BackupException(e.getMessage()); + } + + LOG.info("Manifest file stored to " + manifestFilePath); + } + + /** + * Protobuf serialization + * @return The filter serialized using pb + */ + public byte[] toByteArray() { + BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder(); + builder.setVersion(this.version); + builder.setBackupId(this.backupId); + builder.setType(BackupProtos.BackupType.valueOf(this.type.name())); + setTableList(builder); + builder.setStartTs(this.startTs); + builder.setCompleteTs(this.completeTs); + setIncrementalTimestampMap(builder); + setDependencyMap(builder); + return builder.build().toByteArray(); + } + + private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { + if (this.incrTimeRanges == null) { + return; + } + for (Entry> entry: this.incrTimeRanges.entrySet()) { + TableName key = entry.getKey(); + HashMap value = entry.getValue(); + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); + + for (String s : value.keySet()) { + BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder(); + stBuilder.setServer(s); + stBuilder.setTimestamp(value.get(s)); + tstBuilder.addServerTimestamp(stBuilder.build()); + } + builder.addTstMap(tstBuilder.build()); + } + } + + private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) { + for (BackupImage image: getDependency().values()) { + builder.addDependentBackupImage(image.toProto()); + } + } + + private void setTableList(BackupProtos.BackupManifest.Builder builder) { + for(TableName name: tableList){ + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + } + + /** + * Parse protobuf from byte array + * @param pbBytes A pb serialized BackupManifest instance + * @return An instance of made from bytes + * @throws DeserializationException + */ + private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes) + throws DeserializationException { + BackupProtos.BackupManifest proto; + try { + proto = BackupProtos.BackupManifest.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return proto; + } + + /** + * Get manifest file version + * @return version + */ + public String getVersion() { + return version; + } + + /** + * Get this backup image. + * @return the backup image. + */ + public BackupImage getBackupImage() { + return this.getDependency().get(this.backupId); + } + + /** + * Add dependent backup image for this backup. + * @param image The direct dependent backup image + */ + public void addDependentImage(BackupImage image) { + this.getDependency().get(this.backupId).addAncestor(image); + this.setDependencyMap(this.getDependency(), image); + } + + + + /** + * Get all dependent backup images. The image of this backup is also contained. + * @return The dependent backup images map + */ + public Map getDependency() { + if (this.dependency == null) { + this.dependency = new HashMap(); + LOG.debug(this.rootDir + " " + this.backupId + " " + this.type); + this.dependency.put(this.backupId, + new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, + this.completeTs)); + } + return this.dependency; + } + + /** + * Set the incremental timestamp map directly. + * @param incrTimestampMap timestamp map + */ + public void setIncrTimestampMap(HashMap> incrTimestampMap) { + this.incrTimeRanges = incrTimestampMap; + } + + + public Map> getIncrTimestampMap() { + if (this.incrTimeRanges == null) { + this.incrTimeRanges = new HashMap>(); + } + return this.incrTimeRanges; + } + + + /** + * Get the image list of this backup for restore in time order. + * @param reverse If true, then output in reverse order, otherwise in time order from old to new + * @return the backup image list for restore in time order + */ + public ArrayList getRestoreDependentList(boolean reverse) { + TreeMap restoreImages = new TreeMap(); + for (BackupImage image : this.getDependency().values()) { + restoreImages.put(Long.valueOf(image.startTs), image); + } + return new ArrayList(reverse ? (restoreImages.descendingMap().values()) + : (restoreImages.values())); + } + + /** + * Get the dependent image list for a specific table of this backup in time order from old to new + * if want to restore to this backup image level. + * @param table table + * @return the backup image list for a table in time order + */ + public ArrayList getDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(true); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + if (image.getType() == BackupType.FULL) { + break; + } + } + } + Collections.reverse(tableImageList); + return tableImageList; + } + + /** + * Get the full dependent image list in the whole dependency scope for a specific table of this + * backup in time order from old to new. + * @param table table + * @return the full backup image list for a table in time order in the whole scope of the + * dependency of this image + */ + public ArrayList getAllDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(false); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + } + } + return tableImageList; + } + + + /** + * Recursively set the dependency map of the backup images. + * @param map The dependency map + * @param image The backup image + */ + private void setDependencyMap(Map map, BackupImage image) { + if (image == null) { + return; + } else { + map.put(image.getBackupId(), image); + for (BackupImage img : image.getAncestors()) { + setDependencyMap(map, img); + } + } + } + + /** + * Check whether backup image1 could cover backup image2 or not. + * @param image1 backup image 1 + * @param image2 backup image 2 + * @return true if image1 can cover image2, otherwise false + */ + public static boolean canCoverImage(BackupImage image1, BackupImage image2) { + // image1 can cover image2 only when the following conditions are satisfied: + // - image1 must not be an incremental image; + // - image1 must be taken after image2 has been taken; + // - table set of image1 must cover the table set of image2. + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image2.getStartTs()) { + return false; + } + List image1TableList = image1.getTableNames(); + List image2TableList = image2.getTableNames(); + boolean found = false; + for (int i = 0; i < image2TableList.size(); i++) { + found = false; + for (int j = 0; j < image1TableList.size(); j++) { + if (image2TableList.get(i).equals(image1TableList.get(j))) { + found = true; + break; + } + } + if (!found) { + return false; + } + } + + LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); + return true; + } + + /** + * Check whether backup image set could cover a backup image or not. + * @param fullImages The backup image set + * @param image The target backup image + * @return true if fullImages can cover image, otherwise false + */ + public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { + // fullImages can cover image only when the following conditions are satisfied: + // - each image of fullImages must not be an incremental image; + // - each image of fullImages must be taken after image has been taken; + // - sum table set of fullImages must cover the table set of image. + for (BackupImage image1 : fullImages) { + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image.getStartTs()) { + return false; + } + } + + ArrayList image1TableList = new ArrayList(); + for (BackupImage image1 : fullImages) { + List tableList = image1.getTableNames(); + for (TableName table : tableList) { + image1TableList.add(table.getNameAsString()); + } + } + ArrayList image2TableList = new ArrayList(); + List tableList = image.getTableNames(); + for (TableName table : tableList) { + image2TableList.add(table.getNameAsString()); + } + + for (int i = 0; i < image2TableList.size(); i++) { + if (image1TableList.contains(image2TableList.get(i)) == false) { + return false; + } + } + + LOG.debug("Full image set can cover image " + image.getBackupId()); + return true; + } + + public BackupInfo toBackupInfo() + { + BackupInfo info = new BackupInfo(); + info.setType(type); + TableName[] tables = new TableName[tableList.size()]; + info.addTables(getTableList().toArray(tables)); + info.setBackupId(backupId); + info.setStartTs(startTs); + info.setTargetRootDir(rootDir); + if(type == BackupType.INCREMENTAL) { + info.setHlogTargetDir(logBackupDir); + } + return info; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java new file mode 100644 index 0000000..ac1d2bc --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * BackupRestoreConstants holds a bunch of HBase Backup and Restore constants + */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public final class BackupRestoreConstants { + + + // delimiter in tablename list in restore command + public static final String TABLENAME_DELIMITER_IN_COMMAND = ","; + + public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root"; + + public static final String BACKUPID_PREFIX = "backup_"; + + public static enum BackupCommand { + CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, SET, + SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST + } + + private BackupRestoreConstants() { + // Can't be instantiated with this ctor. + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java new file mode 100644 index 0000000..d05d54c --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -0,0 +1,926 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * This class provides 'hbase:backup' table API + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupSystemTable implements Closeable { + + static class WALItem { + String backupId; + String walFile; + String backupRoot; + + WALItem(String backupId, String walFile, String backupRoot) { + this.backupId = backupId; + this.walFile = walFile; + this.backupRoot = backupRoot; + } + + public String getBackupId() { + return backupId; + } + + public String getWalFile() { + return walFile; + } + + public String getBackupRoot() { + return backupRoot; + } + + public String toString() { + return "/" + backupRoot + "/" + backupId + "/" + walFile; + } + + } + + private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); + private final static TableName tableName = TableName.BACKUP_TABLE_NAME; + // Stores backup sessions (contexts) + final static byte[] SESSIONS_FAMILY = "session".getBytes(); + // Stores other meta + final static byte[] META_FAMILY = "meta".getBytes(); + // Connection to HBase cluster, shared + // among all instances + private final Connection connection; + + public BackupSystemTable(Connection conn) throws IOException { + this.connection = conn; + } + + public void close() { + // do nothing + } + + /** + * Updates status (state) of a backup session in hbase:backup table + * @param context context + * @throws IOException exception + */ + public void updateBackupInfo(BackupInfo context) throws IOException { + + if (LOG.isDebugEnabled()) { + LOG.debug("update backup status in hbase:backup for: " + context.getBackupId() + + " set status=" + context.getState()); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForBackupContext(context); + table.put(put); + } + } + + /** + * Deletes backup status from hbase:backup table + * @param backupId backup id + * @throws IOException exception + */ + + public void deleteBackupInfo(String backupId) throws IOException { + + if (LOG.isDebugEnabled()) { + LOG.debug("delete backup status in hbase:backup for " + backupId); + } + try (Table table = connection.getTable(tableName)) { + Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId); + table.delete(del); + } + } + + /** + * Reads backup status object (instance of BackupContext) from hbase:backup table + * @param backupId - backupId + * @return Current status of backup session or null + */ + + public BackupInfo readBackupInfo(String backupId) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read backup status from hbase:backup for: " + backupId); + } + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForBackupContext(backupId); + Result res = table.get(get); + if (res.isEmpty()) { + return null; + } + return BackupSystemTableHelper.resultToBackupInfo(res); + } + } + + /** + * Read the last backup start code (timestamp) of last successful backup. Will return null if + * there is no start code stored on hbase or the value is of length 0. These two cases indicate + * there is no successful backup completed so far. + * @param backupRoot root directory path to backup + * @return the timestamp of last successful backup + * @throws IOException exception + */ + public String readBackupStartCode(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read backup start code from hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot); + Result res = table.get(get); + if (res.isEmpty()) { + return null; + } + Cell cell = res.listCells().get(0); + byte[] val = CellUtil.cloneValue(cell); + if (val.length == 0) { + return null; + } + return new String(val); + } + } + + /** + * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. + * @param startCode start code + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write backup start code to hbase:backup " + startCode); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot); + table.put(put); + } + } + + /** + * Get the Region Servers log information after the last log roll from hbase:backup. + * @param backupRoot root directory path to backup + * @return RS log info + * @throws IOException exception + */ + public HashMap readRegionServerLastLogRollResult(String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read region server last roll log result to hbase:backup"); + } + + Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot); + + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + HashMap rsTimestampMap = new HashMap(); + while ((res = scanner.next()) != null) { + res.advance(); + Cell cell = res.current(); + byte[] row = CellUtil.cloneRow(cell); + String server = + BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row); + byte[] data = CellUtil.cloneValue(cell); + rsTimestampMap.put(server, Long.parseLong(new String(data))); + } + return rsTimestampMap; + } + } + + /** + * Writes Region Server last roll log result (timestamp) to hbase:backup table + * @param server - Region Server name + * @param ts- last log timestamp + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write region server last roll log result to hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Put put = + BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, ts, backupRoot); + table.put(put); + } + } + + /** + * Get all completed backup information (in desc order by time) + * @param onlyCompeleted, true, if only successfully completed sessions + * @return history info of BackupCompleteData + * @throws IOException exception + */ + public ArrayList getBackupHistory(boolean onlyCompleted) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get backup history from hbase:backup"); + } + ArrayList list; + BackupState state = onlyCompleted ? BackupState.COMPLETE : BackupState.ANY; + list = getBackupContexts(state); + return BackupClientUtil.sortHistoryListDesc(list); + } + + /** + * Get all backups history + * @return list of backup info + * @throws IOException + */ + public List getBackupHistory() throws IOException { + return getBackupHistory(false); + } + + /** + * Get first n backup history records + * @param n - number of records + * @return list of records + * @throws IOException + */ + public List getHistory(int n) throws IOException { + + List history = getBackupHistory(); + if (history.size() <= n) return history; + List list = new ArrayList(); + for (int i = 0; i < n; i++) { + list.add(history.get(i)); + } + return list; + + } + + /** + * Get backup history records filtered by list + * of filters. + * @param n - max number of records + * @param filters - list of filters + * @return backup records + * @throws IOException + */ + public List getBackupHistory(int n, BackupInfo.Filter... filters) throws IOException { + if (filters.length == 0) return getHistory(n); + + List history = getBackupHistory(); + List result = new ArrayList(); + for (BackupInfo bi : history) { + if (result.size() == n) break; + boolean passed = true; + for (int i = 0; i < filters.length; i++) { + if (!filters[i].apply(bi)) { + passed = false; + break; + } + } + if (passed) { + result.add(bi); + } + } + return result; + + } + + /** + * Get history for backup destination + * @param backupRoot - backup destination + * @return List of backup info + * @throws IOException + */ + public List getBackupHistory(String backupRoot) throws IOException { + ArrayList history = getBackupHistory(false); + for (Iterator iterator = history.iterator(); iterator.hasNext();) { + BackupInfo info = iterator.next(); + if (!backupRoot.equals(info.getTargetRootDir())) { + iterator.remove(); + } + } + return history; + } + + /** + * Get history for a table + * @param name - table name + * @return history for a table + * @throws IOException + */ + public List getBackupHistoryForTable(TableName name) throws IOException { + List history = getBackupHistory(); + List tableHistory = new ArrayList(); + for (BackupInfo info : history) { + List tables = info.getTableNames(); + if (tables.contains(name)) { + tableHistory.add(info); + } + } + return tableHistory; + } + + public Map> + getBackupHistoryForTableSet(Set set, String backupRoot) throws IOException { + List history = getBackupHistory(backupRoot); + Map> tableHistoryMap = + new HashMap>(); + for (Iterator iterator = history.iterator(); iterator.hasNext();) { + BackupInfo info = iterator.next(); + if (!backupRoot.equals(info.getTargetRootDir())) { + continue; + } + List tables = info.getTableNames(); + for (TableName tableName: tables) { + if (set.contains(tableName)) { + ArrayList list = tableHistoryMap.get(tableName); + if (list == null) { + list = new ArrayList(); + tableHistoryMap.put(tableName, list); + } + list.add(info); + } + } + } + return tableHistoryMap; + } + + /** + * Get all backup session with a given status (in desc order by time) + * @param status status + * @return history info of backup contexts + * @throws IOException exception + */ + public ArrayList getBackupContexts(BackupState status) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get backup contexts from hbase:backup"); + } + + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + ArrayList list = new ArrayList(); + + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current()); + if (status != BackupState.ANY && context.getState() != status) { + continue; + } + list.add(context); + } + return list; + } + } + + /** + * Write the current timestamps for each regionserver to hbase:backup after a successful full or + * incremental backup. The saved timestamp is of the last log file that was backed up already. + * @param tables tables + * @param newTimestamps timestamps + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void writeRegionServerLogTimestamp(Set tables, + HashMap newTimestamps, String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write RS log time stamps to hbase:backup for tables [" + + StringUtils.join(tables, ",") + "]"); + } + List puts = new ArrayList(); + for (TableName table : tables) { + byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray(); + Put put = + BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData, + backupRoot); + puts.add(put); + } + try (Table table = connection.getTable(tableName)) { + table.put(puts); + } + } + + /** + * Read the timestamp for each region server log after the last successful backup. Each table has + * its own set of the timestamps. The info is stored for each table as a concatenated string of + * rs->timestapmp + * @param backupRoot root directory path to backup + * @return the timestamp for each region server. key: tableName value: + * RegionServer,PreviousTimeStamp + * @throws IOException exception + */ + public HashMap> readLogTimestampMap(String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read RS log ts from hbase:backup for root=" + backupRoot); + } + + HashMap> tableTimestampMap = + new HashMap>(); + + Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(backupRoot); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + Cell cell = res.current(); + byte[] row = CellUtil.cloneRow(cell); + String tabName = BackupSystemTableHelper.getTableNameForReadLogTimestampMap(row); + TableName tn = TableName.valueOf(tabName); + byte[] data = CellUtil.cloneValue(cell); + if (data == null) { + throw new IOException("Data of last backup data from hbase:backup " + + "is empty. Create a backup first."); + } + if (data != null && data.length > 0) { + HashMap lastBackup = + fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); + tableTimestampMap.put(tn, lastBackup); + } + } + return tableTimestampMap; + } + } + + private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, + Map map) { + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(table)); + + for (Entry entry : map.entrySet()) { + BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); + builder.setServer(entry.getKey()); + builder.setTimestamp(entry.getValue()); + tstBuilder.addServerTimestamp(builder.build()); + } + + return tstBuilder.build(); + } + + private HashMap fromTableServerTimestampProto( + BackupProtos.TableServerTimestamp proto) { + HashMap map = new HashMap(); + List list = proto.getServerTimestampList(); + for (BackupProtos.ServerTimestamp st : list) { + map.put(st.getServer(), st.getTimestamp()); + } + return map; + } + + /** + * Return the current tables covered by incremental backup. + * @param backupRoot root directory path to backup + * @return set of tableNames + * @throws IOException exception + */ + public Set getIncrementalBackupTableSet(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get incr backup table set from hbase:backup"); + } + TreeSet set = new TreeSet<>(); + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(backupRoot); + Result res = table.get(get); + if (res.isEmpty()) { + return set; + } + List cells = res.listCells(); + for (Cell cell : cells) { + // qualifier = table name - we use table names as qualifiers + set.add(TableName.valueOf(CellUtil.cloneQualifier(cell))); + } + return set; + } + } + + /** + * Add tables to global incremental backup set + * @param tables - set of tables + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void addIncrementalBackupTableSet(Set tables, String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Add incremental backup table set to hbase:backup. ROOT=" + backupRoot + + " tables [" + StringUtils.join(tables, " ") + "]"); + for (TableName table : tables) { + LOG.debug(table); + } + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables, backupRoot); + table.put(put); + } + } + + /** + * Removes incremental backup set + * @param backupRoot backup root + */ + + public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Delete incremental backup table set to hbase:backup. ROOT=" + backupRoot); + } + try (Table table = connection.getTable(tableName)) { + Delete delete = BackupSystemTableHelper.createDeleteForIncrBackupTableSet(backupRoot); + table.delete(delete); + } + } + + /** + * Register WAL files as eligible for deletion + * @param files files + * @param backupId backup id + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void addWALFiles(List files, String backupId, String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("add WAL files to hbase:backup: " + backupId + " " + backupRoot + " files [" + + StringUtils.join(files, ",") + "]"); + for (String f : files) { + LOG.debug("add :" + f); + } + } + try (Table table = connection.getTable(tableName)) { + List puts = + BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot); + table.put(puts); + } + } + + /** + * Register WAL files as eligible for deletion + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public Iterator getWALFilesIterator(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get WAL files from hbase:backup"); + } + final Table table = connection.getTable(tableName); + Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot); + final ResultScanner scanner = table.getScanner(scan); + final Iterator it = scanner.iterator(); + return new Iterator() { + + @Override + public boolean hasNext() { + boolean next = it.hasNext(); + if (!next) { + // close all + try { + scanner.close(); + table.close(); + } catch (IOException e) { + LOG.error("Close WAL Iterator", e); + } + } + return next; + } + + @Override + public WALItem next() { + Result next = it.next(); + List cells = next.listCells(); + byte[] buf = cells.get(0).getValueArray(); + int len = cells.get(0).getValueLength(); + int offset = cells.get(0).getValueOffset(); + String backupId = new String(buf, offset, len); + buf = cells.get(1).getValueArray(); + len = cells.get(1).getValueLength(); + offset = cells.get(1).getValueOffset(); + String walFile = new String(buf, offset, len); + buf = cells.get(2).getValueArray(); + len = cells.get(2).getValueLength(); + offset = cells.get(2).getValueOffset(); + String backupRoot = new String(buf, offset, len); + return new WALItem(backupId, walFile, backupRoot); + } + + @Override + public void remove() { + // not implemented + throw new RuntimeException("remove is not supported"); + } + }; + + } + + /** + * Check if WAL file is eligible for deletion Future: to support all backup destinations + * @param file file + * @return true, if - yes. + * @throws IOException exception + */ + public boolean isWALFileDeletable(String file) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Check if WAL file has been already backed up in hbase:backup " + file); + } + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForCheckWALFile(file); + Result res = table.get(get); + if (res.isEmpty()) { + return false; + } + return true; + } + } + + /** + * Checks if we have at least one backup session in hbase:backup This API is used by + * BackupLogCleaner + * @return true, if - at least one session exists in hbase:backup table + * @throws IOException exception + */ + public boolean hasBackupSessions() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Has backup sessions from hbase:backup"); + } + boolean result = false; + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + scan.setCaching(1); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + if (scanner.next() != null) { + result = true; + } + return result; + } + } + + /** + * BACKUP SETS + */ + + /** + * Get backup set list + * @return backup set list + * @throws IOException + */ + public List listBackupSets() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set list"); + } + List list = new ArrayList(); + Table table = null; + ResultScanner scanner = null; + try { + table = connection.getTable(tableName); + Scan scan = BackupSystemTableHelper.createScanForBackupSetList(); + scan.setMaxVersions(1); + scanner = table.getScanner(scan); + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current())); + } + return list; + } finally { + if (scanner != null) { + scanner.close(); + } + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup set description (list of tables) + * @param name - set's name + * @return list of tables in a backup set + * @throws IOException + */ + public List describeBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set describe: " + name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) return null; + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + return toList(tables); + } finally { + if (table != null) { + table.close(); + } + } + } + + private List toList(String[] tables) { + List list = new ArrayList(tables.length); + for (String name : tables) { + list.add(TableName.valueOf(name)); + } + return list; + } + + /** + * Add backup set (list of tables) + * @param name - set name + * @param tables - list of tables, comma-separated + * @throws IOException + */ + public void addToBackupSet(String name, String[] newTables) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]"); + } + Table table = null; + String[] union = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) { + union = newTables; + } else { + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + union = merge(tables, newTables); + } + Put put = BackupSystemTableHelper.createPutForBackupSet(name, union); + table.put(put); + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] merge(String[] tables, String[] newTables) { + List list = new ArrayList(); + // Add all from tables + for (String t : tables) { + list.add(t); + } + for (String nt : newTables) { + if (list.contains(nt)) continue; + list.add(nt); + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Remove tables from backup set (list of tables) + * @param name - set name + * @param tables - list of tables, comma-separated + * @throws IOException + */ + public void removeFromBackupSet(String name, String[] toRemove) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ") + + "]"); + } + Table table = null; + String[] disjoint = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) { + LOG.warn("Backup set '" + name + "' not found."); + return; + } else { + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + disjoint = disjoin(tables, toRemove); + } + if (disjoint.length > 0) { + Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint); + table.put(put); + } else { + // Delete + // describeBackupSet(name); + LOG.warn("Backup set '" + name + "' does not contain tables [" + + StringUtils.join(toRemove, " ") + "]"); + } + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] disjoin(String[] tables, String[] toRemove) { + List list = new ArrayList(); + // Add all from tables + for (String t : tables) { + list.add(t); + } + for (String nt : toRemove) { + if (list.contains(nt)) { + list.remove(nt); + } + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Delete backup set + * @param name set's name + * @throws IOException + */ + public void deleteBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set delete: " + name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name); + table.delete(del); + } finally { + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup system table descriptor + * @return descriptor + */ + public static HTableDescriptor getSystemTableDescriptor() { + HTableDescriptor tableDesc = new HTableDescriptor(tableName); + HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY); + colSessionsDesc.setMaxVersions(1); + // Time to keep backup sessions (secs) + Configuration config = HBaseConfiguration.create(); + int ttl = config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); + colSessionsDesc.setTimeToLive(ttl); + tableDesc.addFamily(colSessionsDesc); + HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY); + // colDesc.setMaxVersions(1); + tableDesc.addFamily(colMetaDesc); + return tableDesc; + } + + public static String getTableNameAsString() { + return tableName.getNameAsString(); + } + + public static TableName getTableName() { + return tableName; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java new file mode 100644 index 0000000..37f29f8 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -0,0 +1,433 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; + + +/** + * A collection for methods used by BackupSystemTable. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupSystemTableHelper { + + /** + * hbase:backup schema: + * 1. Backup sessions rowkey= "session:" + backupId; value = serialized BackupContext + * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode + * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] + * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> last WAL + * timestamp] + * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp + * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name + */ + + private final static String BACKUP_INFO_PREFIX = "session:"; + private final static String START_CODE_ROW = "startcode:"; + private final static String INCR_BACKUP_SET = "incrbackupset:"; + private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:"; + private final static String RS_LOG_TS_PREFIX = "rslogts:"; + private final static String WALS_PREFIX = "wals:"; + private final static String SET_KEY_PREFIX = "backupset:"; + + private final static byte[] EMPTY_VALUE = new byte[] {}; + + // Safe delimiter in a string + private final static String NULL = "\u0000"; + + private BackupSystemTableHelper() { + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Creates Put operation for a given backup context object + * @param context backup context + * @return put operation + * @throws IOException exception + */ + static Put createPutForBackupContext(BackupInfo context) throws IOException { + Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId())); + put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray()); + return put; + } + + /** + * Creates Get operation for a given backup id + * @param backupId - backup's ID + * @return get operation + * @throws IOException exception + */ + static Get createGetForBackupContext(String backupId) throws IOException { + Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId)); + get.addFamily(BackupSystemTable.SESSIONS_FAMILY); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Delete operation for a given backup id + * @param backupId - backup's ID + * @return delete operation + * @throws IOException exception + */ + public static Delete createDeleteForBackupInfo(String backupId) { + Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId)); + del.addFamily(BackupSystemTable.SESSIONS_FAMILY); + return del; + } + + /** + * Converts Result to BackupContext + * @param res - HBase result + * @return backup context instance + * @throws IOException exception + */ + static BackupInfo resultToBackupInfo(Result res) throws IOException { + res.advance(); + Cell cell = res.current(); + return cellToBackupInfo(cell); + } + + /** + * Creates Get operation to retrieve start code from hbase:backup + * @return get operation + * @throws IOException exception + */ + static Get createGetForStartCode(String rootPath) throws IOException { + Get get = new Get(rowkey(START_CODE_ROW, rootPath)); + get.addFamily(BackupSystemTable.META_FAMILY); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Put operation to store start code to hbase:backup + * @return put operation + * @throws IOException exception + */ + static Put createPutForStartCode(String startCode, String rootPath) { + Put put = new Put(rowkey(START_CODE_ROW, rootPath)); + put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes()); + return put; + } + + /** + * Creates Get to retrieve incremental backup table set from hbase:backup + * @return get operation + * @throws IOException exception + */ + static Get createGetForIncrBackupTableSet(String backupRoot) throws IOException { + Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot)); + get.addFamily(BackupSystemTable.META_FAMILY); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Put to store incremental backup table set + * @param tables tables + * @return put operation + */ + static Put createPutForIncrBackupTableSet(Set tables, String backupRoot) { + Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot)); + for (TableName table : tables) { + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()), + EMPTY_VALUE); + } + return put; + } + + /** + * Creates Delete for incremental backup table set + * @param backupRoot backup root + * @return delete operation + */ + static Delete createDeleteForIncrBackupTableSet(String backupRoot) { + Delete delete = new Delete(rowkey(INCR_BACKUP_SET, backupRoot)); + delete.addFamily(BackupSystemTable.META_FAMILY); + return delete; + } + + /** + * Creates Scan operation to load backup history + * @return scan operation + */ + static Scan createScanForBackupHistory() { + Scan scan = new Scan(); + byte[] startRow = BACKUP_INFO_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.SESSIONS_FAMILY); + scan.setMaxVersions(1); + return scan; + } + + /** + * Converts cell to backup context instance. + * @param current - cell + * @return backup context instance + * @throws IOException exception + */ + static BackupInfo cellToBackupInfo(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + return BackupInfo.fromByteArray(data); + } + + /** + * Creates Put to write RS last roll log timestamp map + * @param table - table + * @param smap - map, containing RS:ts + * @return put operation + */ + static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, + String backupRoot) { + Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); + put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap); + return put; + } + + /** + * Creates Scan to load table-> { RS -> ts} map of maps + * @return scan operation + */ + static Scan createScanForReadLogTimestampMap(String backupRoot) { + Scan scan = new Scan(); + byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + + return scan; + } + + /** + * Get table name from rowkey + * @param cloneRow rowkey + * @return table name + */ + static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { + String s = new String(cloneRow); + int index = s.lastIndexOf(NULL); + return s.substring(index + 1); + } + + /** + * Creates Put to store RS last log result + * @param server - server name + * @param timestamp - log roll result (timestamp) + * @return put operation + */ + static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, + String backupRoot) { + Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); + put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), timestamp.toString() + .getBytes()); + return put; + } + + /** + * Creates Scan operation to load last RS log roll results + * @return scan operation + */ + static Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) { + Scan scan = new Scan(); + byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + scan.setMaxVersions(1); + + return scan; + } + + /** + * Get server's name from rowkey + * @param row - rowkey + * @return server's name + */ + static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { + String s = new String(row); + int index = s.lastIndexOf(NULL); + return s.substring(index + 1); + } + + /** + * Creates put list for list of WAL files + * @param files list of WAL file paths + * @param backupId backup id + * @return put list + * @throws IOException exception + */ + public static List createPutsForAddWALFiles(List files, String backupId, + String backupRoot) throws IOException { + + List puts = new ArrayList(); + for (String file : files) { + Put put = new Put(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); + put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes()); + puts.add(put); + } + return puts; + } + + /** + * Creates Scan operation to load WALs TODO: support for backupRoot + * @param backupRoot - path to backup destination + * @return scan operation + */ + public static Scan createScanForGetWALs(String backupRoot) { + Scan scan = new Scan(); + byte[] startRow = WALS_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + return scan; + } + + /** + * Creates Get operation for a given wal file name TODO: support for backup destination + * @param file file + * @return get operation + * @throws IOException exception + */ + public static Get createGetForCheckWALFile(String file) throws IOException { + Get get = new Get(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); + // add backup root column + get.addFamily(BackupSystemTable.META_FAMILY); + return get; + } + + /** + * Creates Scan operation to load backup set list + * @return scan operation + */ + static Scan createScanForBackupSetList() { + Scan scan = new Scan(); + byte[] startRow = SET_KEY_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + return scan; + } + + /** + * Creates Get operation to load backup set content + * @return get operation + */ + static Get createGetForBackupSet(String name) { + Get get = new Get(rowkey(SET_KEY_PREFIX, name)); + get.addFamily(BackupSystemTable.META_FAMILY); + return get; + } + + /** + * Creates Delete operation to delete backup set content + * @param name - backup set's name + * @return delete operation + */ + static Delete createDeleteForBackupSet(String name) { + Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); + del.addFamily(BackupSystemTable.META_FAMILY); + return del; + } + + /** + * Creates Put operation to update backup set content + * @param name - backup set's name + * @param tables - list of tables + * @return put operation + */ + static Put createPutForBackupSet(String name, String[] tables) { + Put put = new Put(rowkey(SET_KEY_PREFIX, name)); + byte[] value = convertToByteArray(tables); + put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); + return put; + } + + private static byte[] convertToByteArray(String[] tables) { + return StringUtils.join(tables, ",").getBytes(); + } + + /** + * Converts cell to backup set list. + * @param current - cell + * @return backup set + * @throws IOException + */ + static String[] cellValueToBackupSet(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + if (data != null && data.length > 0) { + return new String(data).split(","); + } else { + return new String[0]; + } + } + + /** + * Converts cell key to backup set name. + * @param current - cell + * @return backup set name + * @throws IOException + */ + static String cellKeyToBackupSetName(Cell current) throws IOException { + byte[] data = CellUtil.cloneRow(current); + return new String(data).substring(SET_KEY_PREFIX.length()); + } + + static byte[] rowkey(String s, String... other) { + StringBuilder sb = new StringBuilder(s); + for (String ss : other) { + sb.append(ss); + } + return sb.toString().getBytes(); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java new file mode 100644 index 0000000..9355d07 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java @@ -0,0 +1,540 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupCopyService; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupException; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; + +@InterfaceAudience.Private +public class FullTableBackupClient { + private static final Log LOG = LogFactory.getLog(FullTableBackupClient.class); + + private Configuration conf; + private Connection conn; + private String backupId; + private List tableList; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupInfo backupContext; + + public FullTableBackupClient() { + // Required by the Procedure framework to create the procedure on replay + } + + public FullTableBackupClient(final Connection conn, final String backupId, + BackupRequest request) + throws IOException { + backupManager = new BackupManager(conn, conn.getConfiguration()); + this.backupId = backupId; + this.tableList = request.getTableList(); + this.conn = conn; + this.conf = conn.getConfiguration(); + backupContext = + backupManager.createBackupContext(backupId, BackupType.FULL, tableList, + request.getTargetRootDir(), + request.getWorkers(), request.getBandwidth()); + if (tableList == null || tableList.isEmpty()) { + this.tableList = new ArrayList<>(backupContext.getTables()); + } + } + + /** + * Begin the overall backup. + * @param backupContext backup context + * @throws IOException exception + */ + static void beginBackup(BackupManager backupManager, BackupInfo backupContext) throws IOException { + backupManager.setBackupContext(backupContext); + // set the start timestamp of the overall backup + long startTs = EnvironmentEdgeManager.currentTime(); + backupContext.setStartTs(startTs); + // set overall backup status: ongoing + backupContext.setState(BackupState.RUNNING); + LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); + + backupManager.updateBackupInfo(backupContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); + } + } + + private static String getMessage(Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + return msg; + } + + /** + * Delete HBase snapshot for backup. + * @param backupCtx backup context + * @throws Exception exception + */ + private static void + deleteSnapshot(final Connection conn, BackupInfo backupCtx, Configuration conf) + throws IOException { + LOG.debug("Trying to delete snapshot for full backup."); + for (String snapshotName : backupCtx.getSnapshotNames()) { + if (snapshotName == null) { + continue; + } + LOG.debug("Trying to delete snapshot: " + snapshotName); + + try (Admin admin = conn.getAdmin();) { + admin.deleteSnapshot(snapshotName); + } catch (IOException ioe) { + LOG.debug("when deleting snapshot " + snapshotName, ioe); + } + LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupCtx.getBackupId() + + " succeeded."); + } + } + + /** + * Clean up directories with prefix "exportSnapshot-", which are generated when exporting + * snapshots. + * @throws IOException exception + */ + private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path stagingDir = + new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() + .toString())); + FileStatus[] files = FSUtils.listStatus(fs, stagingDir); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("exportSnapshot-")) { + LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); + if (FSUtils.delete(fs, file.getPath(), true) == false) { + LOG.warn("Can not delete " + file.getPath()); + } + } + } + } + + /** + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. + */ + static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + try { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + + backupContext.getPhase()); + if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) + || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + // now treat one backup as a transaction, clean up data that has been partially copied at + // table level + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() + + " done."); + } else { + LOG.info("No data has been copied to " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Fail the overall backup. + * @param backupContext backup context + * @param e exception + * @throws Exception exception + */ + static void failBackup(Connection conn, BackupInfo backupContext, BackupManager backupManager, + Exception e, String msg, BackupType type, Configuration conf) throws IOException { + LOG.error(msg + getMessage(e), e); + // If this is a cancel exception, then we've already cleaned. + + // set the failure timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + + // set failure message + backupContext.setFailedMsg(e.getMessage()); + + // set overall backup status: failed + backupContext.setState(BackupState.FAILED); + + // compose the backup failed data + String backupFailedData = + "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() + + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() + + ",failedmessage=" + backupContext.getFailedMsg(); + LOG.error(backupFailedData); + + backupManager.updateBackupInfo(backupContext); + + // if full backup, then delete HBase snapshots if there already are snapshots taken + // and also clean up export snapshot log files if exist + if (type == BackupType.FULL) { + deleteSnapshot(conn, backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupContext, conf); + + LOG.info("Backup " + backupContext.getBackupId() + " failed."); + } + + /** + * Do snapshot copy. + * @param backupContext backup context + * @throws Exception exception + */ + private void snapshotCopy(BackupInfo backupContext) throws Exception { + LOG.info("Snapshot copy is starting."); + + // set overall backup phase: snapshot_copy + backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); + + // call ExportSnapshot to copy files based on hbase snapshot for backup + // ExportSnapshot only support single snapshot export, need loop for multiple tables case + BackupCopyService copyService = BackupRestoreServerFactory.getBackupCopyService(conf); + + // number of snapshots matches number of tables + float numOfSnapshots = backupContext.getSnapshotNames().size(); + + LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); + + for (TableName table : backupContext.getTables()) { + // Currently we simply set the sub copy tasks by counting the table snapshot number, we can + // calculate the real files' size for the percentage in the future. + // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); + int res = 0; + String[] args = new String[4]; + args[0] = "-snapshot"; + args[1] = backupContext.getSnapshotName(table); + args[2] = "-copy-to"; + args[3] = backupContext.getBackupStatus(table).getTargetDir(); + + LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); + res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); + // if one snapshot export failed, do not continue for remained snapshots + if (res != 0) { + LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); + + throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + + " with reason code " + res); + } + LOG.info("Snapshot copy " + args[1] + " finished."); + } + } + + /** + * Add manifest for the current backup. The manifest is stored within the table backup directory. + * @param backupContext The current backup context + * @throws IOException exception + * @throws BackupException exception + */ + private static void addManifest(BackupInfo backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException, BackupException { + // set the overall backup phase : store manifest + backupContext.setPhase(BackupPhase.STORE_MANIFEST); + + BackupManifest manifest; + + // Since we have each table's backup in its own directory structure, + // we'll store its manifest with the table directory. + for (TableName table : backupContext.getTables()) { + manifest = new BackupManifest(backupContext, table); + ArrayList ancestors = backupManager.getAncestors(backupContext, table); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + + if (type == BackupType.INCREMENTAL) { + // We'll store the log timestamps for this table only in its manifest. + HashMap> tableTimestampMap = + new HashMap>(); + tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); + manifest.setIncrTimestampMap(tableTimestampMap); + ArrayList ancestorss = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestorss) { + manifest.addDependentImage(image); + } + } + manifest.store(conf); + } + + // For incremental backup, we store a overall manifest in + // /WALs/ + // This is used when created the next incremental backup + if (type == BackupType.INCREMENTAL) { + manifest = new BackupManifest(backupContext); + // set the table region server start and end timestamps for incremental backup + manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); + ArrayList ancestors = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + manifest.store(conf); + } + } + + /** + * Get backup request meta data dir as string. + * @param backupContext backup context + * @return meta data dir + */ + private static String obtainBackupMetaDataStr(BackupInfo backupContext) { + StringBuffer sb = new StringBuffer(); + sb.append("type=" + backupContext.getType() + ",tablelist="); + for (TableName table : backupContext.getTables()) { + sb.append(table + ";"); + } + if (sb.lastIndexOf(";") > 0) { + sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); + } + sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); + + return sb.toString(); + } + + /** + * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying + * hlogs. + * @throws IOException exception + */ + private static void cleanupDistCpLog(BackupInfo backupContext, Configuration conf) + throws IOException { + Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("_distcp_logs")) { + LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + } + + /** + * Complete the overall backup. + * @param backupContext backup context + * @throws Exception exception + */ + static void completeBackup(final Connection conn, BackupInfo backupContext, + BackupManager backupManager, BackupType type, Configuration conf) throws IOException { + // set the complete timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + // set overall backup status: complete + backupContext.setState(BackupState.COMPLETE); + backupContext.setProgress(100); + // add and store the manifest for the backup + addManifest(backupContext, backupManager, type, conf); + + // after major steps done and manifest persisted, do convert if needed for incremental backup + /* in-fly convert code here, provided by future jira */ + LOG.debug("in-fly convert code here, provided by future jira"); + + // compose the backup complete data + String backupCompleteData = + obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() + + ",completets=" + backupContext.getEndTs() + ",bytescopied=" + + backupContext.getTotalBytesCopied(); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); + } + backupManager.updateBackupInfo(backupContext); + + // when full backup is done: + // - delete HBase snapshot + // - clean up directories with prefix "exportSnapshot-", which are generated when exporting + // snapshots + if (type == BackupType.FULL) { + deleteSnapshot(conn, backupContext, conf); + cleanupExportSnapshotLog(conf); + } else if (type == BackupType.INCREMENTAL) { + cleanupDistCpLog(backupContext, conf); + } + + LOG.info("Backup " + backupContext.getBackupId() + " completed."); + } + + /** + * Wrap a SnapshotDescription for a target table. + * @param table table + * @return a SnapshotDescription especially for backup. + */ + static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { + // Mock a SnapshotDescription from backupContext to call SnapshotManager function, + // Name it in the format "snapshot__" + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + builder.setTable(tableName.getNameAsString()); + builder.setName(snapshotName); + HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); + + LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() + + " from backupContext to request snapshot for backup."); + + return backupSnapshot; + } + + /** + * Backup request execution + * @throws IOException + */ + public void execute() throws IOException { + + try (Admin admin = conn.getAdmin();) { + + // Begin BACKUP + beginBackup(backupManager, backupContext); + String savedStartCode = null; + boolean firstBackup = false; + // do snapshot for full table backup + + savedStartCode = backupManager.readBackupStartCode(); + firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L; + if (firstBackup) { + // This is our first backup. Let's put some marker on ZK so that we can hold the logs + // while we do the backup. + backupManager.writeBackupStartCode(0L); + } + // We roll log here before we do the snapshot. It is possible there is duplicate data + // in the log that is already in the snapshot. But if we do it after the snapshot, we + // could have data loss. + // A better approach is to do the roll log on each RS in the same global procedure as + // the snapshot. + LOG.info("Execute roll log procedure for full backup ..."); + + Map props = new HashMap(); + props.put("backupRoot", backupContext.getTargetRootDir()); + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + if (firstBackup) { + // Updates registered log files + // We record ALL old WAL files as registered, because + // this is a first full backup in the system and these + // files are not needed for next incremental backup + List logFiles = BackupServerUtil.getWALFilesOlderThan(conf, newTimestamps); + backupManager.recordWALFiles(logFiles); + } + + // SNAPSHOT_TABLES: + for (TableName tableName : tableList) { + String snapshotName = + "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" + + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + + admin.snapshot(snapshotName, tableName); + + backupContext.setSnapshotName(tableName, snapshotName); + } + + // SNAPSHOT_COPY: + // do snapshot copy + LOG.debug("snapshot copy for " + backupId); + snapshotCopy(backupContext); + // Updates incremental backup table set + backupManager.addIncrementalBackupTableSet(backupContext.getTables()); + + // BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupClientUtil.getMinValue(BackupServerUtil + .getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + + // backup complete + completeBackup(conn, backupContext, backupManager, BackupType.FULL, conf); + } catch (Exception e) { + failBackup(conn, backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + throw new IOException(e); + } + + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java new file mode 100644 index 0000000..8c63f98 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/HBaseBackupAdmin.java @@ -0,0 +1,555 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupAdmin; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import com.google.common.collect.Lists; + +/** + * The administrative API implementation for HBase Backup . Obtain an instance from + * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards. + *

BackupAdmin can be used to create backups, restore data from backups and for + * other backup-related operations. + * + * @see Admin + * @since 2.0 + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving + +public class HBaseBackupAdmin implements BackupAdmin { + private static final Log LOG = LogFactory.getLog(HBaseBackupAdmin.class); + + private final Connection conn; + + public HBaseBackupAdmin(Connection conn) { + this.conn = conn; + } + + @Override + public void close() throws IOException { + } + + @Override + public BackupInfo getBackupInfo(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + backupInfo = table.readBackupInfo(backupId); + return backupInfo; + } + } + + @Override + public int getProgress(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (backupId == null) { + ArrayList recentSessions = table.getBackupContexts(BackupState.RUNNING); + if (recentSessions.isEmpty()) { + LOG.warn("No ongoing sessions found."); + return -1; + } + // else show status for ongoing session + // must be one maximum + return recentSessions.get(0).getProgress(); + } else { + + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + return backupInfo.getProgress(); + } else { + LOG.warn("No information found for backupID=" + backupId); + return -1; + } + } + } + } + + @Override + public int deleteBackups(String[] backupIds) throws IOException { + // TODO: requires FT, failure will leave system + // in non-consistent state + // see HBASE-15227 + + int totalDeleted = 0; + Map> allTablesMap = new HashMap>(); + + try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + for (int i = 0; i < backupIds.length; i++) { + BackupInfo info = sysTable.readBackupInfo(backupIds[i]); + if (info != null) { + String rootDir = info.getTargetRootDir(); + HashSet allTables = allTablesMap.get(rootDir); + if (allTables == null) { + allTables = new HashSet(); + allTablesMap.put(rootDir, allTables); + } + allTables.addAll(info.getTableNames()); + totalDeleted += deleteBackup(backupIds[i], sysTable); + } + } + finalizeDelete(allTablesMap, sysTable); + } + return totalDeleted; + } + + /** + * Updates incremental backup set for every backupRoot + * @param tablesMap - Map [backupRoot: Set] + * @param table - backup system table + * @throws IOException + */ + + private void finalizeDelete(Map> tablesMap, BackupSystemTable table) + throws IOException { + for (String backupRoot : tablesMap.keySet()) { + Set incrTableSet = table.getIncrementalBackupTableSet(backupRoot); + Map> tableMap = + table.getBackupHistoryForTableSet(incrTableSet, backupRoot); + for(Map.Entry> entry: tableMap.entrySet()) { + if(entry.getValue() == null) { + // No more backups for a table + incrTableSet.remove(entry.getKey()); + } + } + if (!incrTableSet.isEmpty()) { + table.addIncrementalBackupTableSet(incrTableSet, backupRoot); + } else { // empty + table.deleteIncrementalBackupTableSet(backupRoot); + } + } + } + + /** + * Delete single backup and all related backups + * Algorithm: + * + * Backup type: FULL or INCREMENTAL + * Is this last backup session for table T: YES or NO + * For every table T from table list 'tables': + * if(FULL, YES) deletes only physical data (PD) + * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until + * we either reach the most recent backup for T in the system or FULL backup which + * includes T + * if(INCREMENTAL, YES) deletes only physical data (PD) + * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images + * between last FULL backup, which is older than the backup being deleted and the next + * FULL backup (if exists) or last one for a particular table T and removes T from list + * of backup tables. + * @param backupId - backup id + * @param sysTable - backup system table + * @return total - number of deleted backup images + * @throws IOException + */ + private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { + + BackupInfo backupInfo = sysTable.readBackupInfo(backupId); + + int totalDeleted = 0; + if (backupInfo != null) { + LOG.info("Deleting backup " + backupInfo.getBackupId() + " ..."); + BackupClientUtil.cleanupBackupData(backupInfo, conn.getConfiguration()); + // List of tables in this backup; + List tables = backupInfo.getTableNames(); + long startTime = backupInfo.getStartTs(); + for (TableName tn : tables) { + boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime); + if (isLastBackupSession) { + continue; + } + // else + List affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable); + for (BackupInfo info : affectedBackups) { + if (info.equals(backupInfo)) { + continue; + } + removeTableFromBackupImage(info, tn, sysTable); + } + } + LOG.debug("Delete backup info "+ backupInfo.getBackupId()); + + sysTable.deleteBackupInfo(backupInfo.getBackupId()); + LOG.info("Delete backup " + backupInfo.getBackupId() + " completed."); + totalDeleted++; + } else { + LOG.warn("Delete backup failed: no information found for backupID=" + backupId); + } + return totalDeleted; + } + + private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) + throws IOException { + List tables = info.getTableNames(); + LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" + + info.getTableListAsString()); + if (tables.contains(tn)) { + tables.remove(tn); + + if (tables.isEmpty()) { + LOG.debug("Delete backup info "+ info.getBackupId()); + + sysTable.deleteBackupInfo(info.getBackupId()); + BackupClientUtil.cleanupBackupData(info, conn.getConfiguration()); + } else { + info.setTables(tables); + sysTable.updateBackupInfo(info); + // Now, clean up directory for table + cleanupBackupDir(info, tn, conn.getConfiguration()); + } + } + } + + private List getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, + BackupSystemTable table) throws IOException { + LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); + long ts = backupInfo.getStartTs(); + List list = new ArrayList(); + List history = table.getBackupHistory(backupInfo.getTargetRootDir()); + // Scan from most recent to backupInfo + // break when backupInfo reached + for (BackupInfo info : history) { + if (info.getStartTs() == ts) { + break; + } + List tables = info.getTableNames(); + if (tables.contains(tn)) { + BackupType bt = info.getType(); + if (bt == BackupType.FULL) { + // Clear list if we encounter FULL backup + list.clear(); + } else { + LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn + + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); + list.add(info); + } + } + } + return list; + } + + + + /** + * Clean up the data at target directory + * @throws IOException + */ + private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) + throws IOException { + try { + // clean up the data at target directory + String targetDir = backupInfo.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupInfo.getBackupId()); + return; + } + + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); + + Path targetDirPath = + new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(), + backupInfo.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table + + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + throw e1; + } + } + + private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) + throws IOException { + List history = table.getBackupHistory(); + for (BackupInfo info : history) { + List tables = info.getTableNames(); + if (!tables.contains(tn)) { + continue; + } + if (info.getStartTs() <= startTime) { + return true; + } else { + return false; + } + } + return false; + } + + @Override + public List getHistory(int n) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List history = table.getBackupHistory(); + if (history.size() <= n) return history; + List list = new ArrayList(); + for (int i = 0; i < n; i++) { + list.add(history.get(i)); + } + return list; + } + } + + @Override + public List getHistory(int n, BackupInfo.Filter ... filters) throws IOException { + if (filters.length == 0) return getHistory(n); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List history = table.getBackupHistory(); + List result = new ArrayList(); + for(BackupInfo bi: history) { + if(result.size() == n) break; + boolean passed = true; + for(int i=0; i < filters.length; i++) { + if(!filters[i].apply(bi)) { + passed = false; + break; + } + } + if(passed) { + result.add(bi); + } + } + return result; + } + } + + @Override + public List listBackupSets() throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List list = table.listBackupSets(); + List bslist = new ArrayList(); + for (String s : list) { + List tables = table.describeBackupSet(s); + if (tables != null) { + bslist.add(new BackupSet(s, tables)); + } + } + return bslist; + } + } + + @Override + public BackupSet getBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List list = table.describeBackupSet(name); + if (list == null) return null; + return new BackupSet(name, list); + } + } + + @Override + public boolean deleteBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (table.describeBackupSet(name) == null) { + return false; + } + table.deleteBackupSet(name); + return true; + } + } + + @Override + public void addToBackupSet(String name, TableName[] tables) throws IOException { + String[] tableNames = new String[tables.length]; + try (final BackupSystemTable table = new BackupSystemTable(conn); + final Admin admin = conn.getAdmin();) { + for (int i = 0; i < tables.length; i++) { + tableNames[i] = tables[i].getNameAsString(); + if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { + throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); + } + } + table.addToBackupSet(name, tableNames); + LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + + "' backup set"); + } + } + + @Override + public void removeFromBackupSet(String name, String[] tables) throws IOException { + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + table.removeFromBackupSet(name, tables); + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + + "' completed."); + } + } + + @Override + public void restore(RestoreRequest request) throws IOException { + if (request.isCheck()) { + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(request.getBackupRootDir()); + String backupId = request.getBackupId(); + TableName[] sTableArray = request.getFromTables(); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, + sTableArray, conn.getConfiguration(), rootPath, backupId); + + // Check and validate the backup image and its dependencies + + if (RestoreServerUtil.validate(backupManifestMap, conn.getConfiguration())) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + + } + // Execute restore request + new RestoreTablesClient(conn, request).execute(); + } + + @Override + public Future restoreAsync(RestoreRequest request) throws IOException { + // TBI + return null; + } + + @Override + public String backupTables(final BackupRequest request) throws IOException { + String setName = request.getBackupSetName(); + BackupType type = request.getBackupType(); + String targetRootDir = request.getTargetRootDir(); + List tableList = request.getTableList(); + + String backupId = + (setName == null || setName.length() == 0 ? BackupRestoreConstants.BACKUPID_PREFIX + : setName + "_") + EnvironmentEdgeManager.currentTime(); + if (type == BackupType.INCREMENTAL) { + Set incrTableSet = null; + try (BackupSystemTable table = new BackupSystemTable(conn)) { + incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); + } + + if (incrTableSet.isEmpty()) { + System.err.println("Incremental backup table set contains no table.\n" + + "Use 'backup create full' or 'backup stop' to \n " + + "change the tables covered by incremental backup."); + throw new IOException("No table covered by incremental backup."); + } + + tableList.removeAll(incrTableSet); + if (!tableList.isEmpty()) { + String extraTables = StringUtils.join(tableList, ","); + System.err.println("Some tables (" + extraTables + ") haven't gone through full backup"); + throw new IOException("Perform full backup on " + extraTables + " first, " + + "then retry the command"); + } + System.out.println("Incremental backup for the following table set: " + incrTableSet); + tableList = Lists.newArrayList(incrTableSet); + } + if (tableList != null && !tableList.isEmpty()) { + for (TableName table : tableList) { + String targetTableBackupDir = + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + Path targetTableBackupDirPath = new Path(targetTableBackupDir); + FileSystem outputFs = + FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); + if (outputFs.exists(targetTableBackupDirPath)) { + throw new IOException("Target backup directory " + targetTableBackupDir + + " exists already."); + } + } + ArrayList nonExistingTableList = null; + try (Admin admin = conn.getAdmin();) { + for (TableName tableName : tableList) { + if (!admin.tableExists(tableName)) { + if (nonExistingTableList == null) { + nonExistingTableList = new ArrayList<>(); + } + nonExistingTableList.add(tableName); + } + } + } + if (nonExistingTableList != null) { + if (type == BackupType.INCREMENTAL) { + System.err.println("Incremental backup table set contains non-exising table: " + + nonExistingTableList); + // Update incremental backup set + tableList = excludeNonExistingTables(tableList, nonExistingTableList); + } else { + // Throw exception only in full mode - we try to backup non-existing table + throw new IOException("Non-existing tables found in the table list: " + + nonExistingTableList); + } + } + } + + // update table list + request.setTableList(tableList); + + if (type == BackupType.FULL) { + new FullTableBackupClient(conn, backupId, request).execute(); + } else { + new IncrementalTableBackupClient(conn, backupId, request).execute(); + } + return backupId; + } + + + private List excludeNonExistingTables(List tableList, + List nonExistingTableList) { + + for (TableName table : nonExistingTableList) { + tableList.remove(table); + } + return tableList; + } + + @Override + public Future backupTablesAsync(final BackupRequest userRequest) throws IOException { + // TBI + return null; + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 8f6aeb8..be5fd23 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,6 +34,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; @@ -42,13 +42,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; -import org.apache.hadoop.hbase.procedure.MasterProcedureManager; -import org.apache.hadoop.hbase.procedure.ProcedureUtil; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem; /** * After a full backup was created, the incremental backup will only store the changes made @@ -64,12 +59,10 @@ public class IncrementalBackupManager { // parent manager private final BackupManager backupManager; private final Configuration conf; - private final Connection conn; public IncrementalBackupManager(BackupManager bm) { this.backupManager = bm; this.conf = bm.getConf(); - this.conn = bm.getConnection(); } /** @@ -80,7 +73,7 @@ public class IncrementalBackupManager { * @return The new HashMap of RS log timestamps after the log roll for this incremental backup. * @throws IOException exception */ - public HashMap getIncrBackupLogFileList(MasterServices svc,BackupInfo backupContext) + public HashMap getIncrBackupLogFileList(Connection conn,BackupInfo backupContext) throws IOException { List logList; HashMap newTimestamps; @@ -109,19 +102,13 @@ public class IncrementalBackupManager { LOG.info("Execute roll log procedure for incremental backup ..."); HashMap props = new HashMap(); props.put("backupRoot", backupContext.getTargetRootDir()); - MasterProcedureManager mpm = svc.getMasterProcedureManagerHost() - .getProcedureManager(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); - long waitTime = ProcedureUtil.execProcedure(mpm, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + + try(Admin admin = conn.getAdmin();) { + + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); - ProcedureUtil.waitForProcedure(mpm, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props, waitTime, - conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER), - conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE)); + } newTimestamps = backupManager.readRegionServerLastLogRollResult(); logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java new file mode 100644 index 0000000..d9610a2 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -0,0 +1,235 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupCopyService; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; + +@InterfaceAudience.Private +public class IncrementalTableBackupClient { + private static final Log LOG = LogFactory.getLog(IncrementalTableBackupClient.class); + + private Configuration conf; + private Connection conn; + //private String backupId; + HashMap newTimestamps = null; + + private String backupId; + private BackupManager backupManager; + private BackupInfo backupContext; + + public IncrementalTableBackupClient() { + // Required by the Procedure framework to create the procedure on replay + } + + public IncrementalTableBackupClient(final Connection conn, final String backupId, + BackupRequest request) + throws IOException { + + this.conn = conn; + this.conf = conn.getConfiguration(); + backupManager = new BackupManager(conn, conf); + this.backupId = backupId; + backupContext = + backupManager.createBackupContext(backupId, BackupType.INCREMENTAL, request.getTableList(), + request.getTargetRootDir(), request.getWorkers(), (int) request.getBandwidth()); + } + + private List filterMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for (String file : incrBackupFileList) { + if (fs.exists(new Path(file))) { + list.add(file); + } else { + LOG.warn("Can't find file: " + file); + } + } + return list; + } + + private List getMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for (String file : incrBackupFileList) { + if (!fs.exists(new Path(file))) { + list.add(file); + } + } + return list; + + } + + /** + * Do incremental copy. + * @param backupContext backup context + */ + private void incrementalCopy(BackupInfo backupContext) throws Exception { + + LOG.info("Incremental copy is starting."); + // set overall backup phase: incremental_copy + backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); + // get incremental backup file list and prepare parms for DistCp + List incrBackupFileList = backupContext.getIncrBackupFileList(); + // filter missing files out (they have been copied by previous backups) + incrBackupFileList = filterMissingFiles(incrBackupFileList); + String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + + BackupCopyService copyService = BackupRestoreServerFactory.getBackupCopyService(conf); + int counter = 0; + int MAX_ITERAIONS = 2; + while (counter++ < MAX_ITERAIONS) { + // We run DistCp maximum 2 times + // If it fails on a second time, we throw Exception + int res = + copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.INCREMENTAL, + strArr); + + if (res != 0) { + LOG.error("Copy incremental log files failed with return code: " + res + "."); + throw new IOException("Failed of Hadoop Distributed Copy from " + + StringUtils.join(incrBackupFileList, ",") + " to " + backupContext.getHLogTargetDir()); + } + List missingFiles = getMissingFiles(incrBackupFileList); + + if (missingFiles.isEmpty()) { + break; + } else { + // Repeat DistCp, some files have been moved from WALs to oldWALs during previous run + // update backupContext and strAttr + if (counter == MAX_ITERAIONS) { + String msg = + "DistCp could not finish the following files: " + StringUtils.join(missingFiles, ","); + LOG.error(msg); + throw new IOException(msg); + } + List converted = convertFilesFromWALtoOldWAL(missingFiles); + incrBackupFileList.removeAll(missingFiles); + incrBackupFileList.addAll(converted); + backupContext.setIncrBackupFileList(incrBackupFileList); + + // Run DistCp only for missing files (which have been moved from WALs to oldWALs + // during previous run) + strArr = converted.toArray(new String[converted.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + } + } + + LOG.info("Incremental copy from " + StringUtils.join(incrBackupFileList, ",") + " to " + + backupContext.getHLogTargetDir() + " finished."); + } + + private List convertFilesFromWALtoOldWAL(List missingFiles) throws IOException { + List list = new ArrayList(); + for (String path : missingFiles) { + if (path.indexOf(Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME) < 0) { + LOG.error("Copy incremental log files failed, file is missing : " + path); + throw new IOException("Failed of Hadoop Distributed Copy to " + + backupContext.getHLogTargetDir() + ", file is missing " + path); + } + list.add(path.replace(Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME, Path.SEPARATOR + + HConstants.HREGION_OLDLOGDIR_NAME)); + } + return list; + } + + public void execute() throws IOException { + + // case PREPARE_INCREMENTAL: + FullTableBackupClient.beginBackup(backupManager, backupContext); + LOG.debug("For incremental backup, current table set is " + + backupManager.getIncrementalBackupTableSet()); + try { + IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager); + + newTimestamps = incrBackupManager.getIncrBackupLogFileList(conn, backupContext); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupClient.failBackup(conn, backupContext, backupManager, e, + "Unexpected Exception : ", BackupType.INCREMENTAL, conf); + } + + // case INCREMENTAL_COPY: + try { + // copy out the table and region info files for each table + BackupServerUtil.copyTableRegionInfo(conn, backupContext, conf); + incrementalCopy(backupContext); + // Save list of WAL files copied + backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); + } catch (Exception e) { + String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; + // fail the overall backup and return + FullTableBackupClient.failBackup(conn, backupContext, backupManager, e, msg, + BackupType.INCREMENTAL, conf); + } + // case INCR_BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + try { + backupContext.setState(BackupState.COMPLETE); + // Set the previousTimestampMap which is before this current log roll to the manifest. + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + backupContext.setIncrTimestampMap(previousTimestampMap); + + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupClientUtil.getMinValue(BackupServerUtil + .getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + // backup complete + FullTableBackupClient.completeBackup(conn, backupContext, backupManager, + BackupType.INCREMENTAL, conf); + + } catch (IOException e) { + FullTableBackupClient.failBackup(conn, backupContext, backupManager, e, + "Unexpected Exception : ", BackupType.INCREMENTAL, conf); + } + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java new file mode 100644 index 0000000..91f2d68 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -0,0 +1,236 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.TreeSet; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; + +@InterfaceAudience.Private +public class RestoreTablesClient { + private static final Log LOG = LogFactory.getLog(RestoreTablesClient.class); + + private Configuration conf; + private Connection conn; + private String backupId; + private TableName[] sTableArray; + private TableName[] tTableArray; + private String targetRootDir; + private boolean isOverwrite; + + public RestoreTablesClient() { + // Required by the Procedure framework to create the procedure on replay + } + + public RestoreTablesClient(Connection conn, RestoreRequest request) + throws IOException { + this.targetRootDir = request.getBackupRootDir(); + this.backupId = request.getBackupId(); + this.sTableArray = request.getFromTables(); + this.tTableArray = request.getToTables(); + if (tTableArray == null || tTableArray.length == 0) { + this.tTableArray = sTableArray; + } + this.isOverwrite = request.isOverwrite(); + this.conn = conn; + this.conf = conn.getConfiguration(); + + } + + /** + * Validate target Tables + * @param conn connection + * @param mgr table state manager + * @param tTableArray: target tables + * @param isOverwrite overwrite existing table + * @throws IOException exception + */ + private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) throws IOException { + ArrayList existTableList = new ArrayList<>(); + ArrayList disabledTableList = new ArrayList<>(); + + // check if the tables already exist + try (Admin admin = conn.getAdmin();) { + for (TableName tableName : tTableArray) { + if (admin.tableExists(tableName)) { + existTableList.add(tableName); + if (admin.isTableDisabled(tableName)) { + disabledTableList.add(tableName); + } + } else { + LOG.info("HBase table " + tableName + + " does not exist. It will be created during restore process"); + } + } + } + + if (existTableList.size() > 0) { + if (!isOverwrite) { + LOG.error("Existing table (" + + existTableList + + ") found in the restore target, please add " + + "\"-overwrite\" option in the command if you mean to restore to these existing tables"); + throw new IOException("Existing table found in target while no \"-overwrite\" " + + "option found"); + } else { + if (disabledTableList.size() > 0) { + LOG.error("Found offline table in the restore target, " + + "please enable them before restore with \"-overwrite\" option"); + LOG.info("Offline table list in restore target: " + disabledTableList); + throw new IOException( + "Found offline table in the target when restore with \"-overwrite\" option"); + } + } + } + } + + /** + * Restore operation handle each backupImage in array + * @param svc: master services + * @param images: array BackupImage + * @param sTable: table to be restored + * @param tTable: table to be restored to + * @param truncateIfExists: truncate table + * @throws IOException exception + */ + + private void restoreImages(BackupImage[] images, TableName sTable, TableName tTable, + boolean truncateIfExists) throws IOException { + + // First image MUST be image of a FULL backup + BackupImage image = images[0]; + String rootDir = image.getRootDir(); + String backupId = image.getBackupId(); + Path backupRoot = new Path(rootDir); + RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); + Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); + String lastIncrBackupId = images.length == 1 ? null : images[images.length - 1].getBackupId(); + // We need hFS only for full restore (see the code) + BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); + if (manifest.getType() == BackupType.FULL) { + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" + " backup image " + + tableBackupPath.toString()); + restoreTool.fullRestoreTable(conn, tableBackupPath, sTable, tTable, truncateIfExists, + lastIncrBackupId); + } else { // incremental Backup + throw new IOException("Unexpected backup type " + image.getType()); + } + + if (images.length == 1) { + // full backup restore done + return; + } + + List dirList = new ArrayList(); + // add full backup path + // full backup path comes first + for (int i = 1; i < images.length; i++) { + BackupImage im = images[i]; + String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); + dirList.add(new Path(logBackupDir)); + } + + String dirs = StringUtils.join(dirList, ","); + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from log dirs: " + dirs); + Path[] paths = new Path[dirList.size()]; + dirList.toArray(paths); + restoreTool.incrementalRestoreTable(conn, tableBackupPath, paths, new TableName[] { sTable }, + new TableName[] { tTable }, lastIncrBackupId); + LOG.info(sTable + " has been successfully restored to " + tTable); + + } + + /** + * Restore operation. Stage 2: resolved Backup Image dependency + * @param backupManifestMap : tableName, Manifest + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @return set of BackupImages restored + * @throws IOException exception + */ + private void restore(HashMap backupManifestMap, + TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { + TreeSet restoreImageSet = new TreeSet(); + boolean truncateIfExists = isOverwrite; + try { + for (int i = 0; i < sTableArray.length; i++) { + TableName table = sTableArray[i]; + BackupManifest manifest = backupManifestMap.get(table); + // Get the image list of this backup for restore in time order from old + // to new. + List list = new ArrayList(); + list.add(manifest.getBackupImage()); + TreeSet set = new TreeSet(list); + List depList = manifest.getDependentListByTable(table); + set.addAll(depList); + BackupImage[] arr = new BackupImage[set.size()]; + set.toArray(arr); + restoreImages(arr, table, tTableArray[i], truncateIfExists); + restoreImageSet.addAll(list); + if (restoreImageSet != null && !restoreImageSet.isEmpty()) { + LOG.info("Restore includes the following image(s):"); + for (BackupImage image : restoreImageSet) { + LOG.info("Backup: " + + image.getBackupId() + + " " + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + table)); + } + } + } + } catch (Exception e) { + LOG.error("Failed", e); + throw new IOException(e); + } + LOG.debug("restoreStage finished"); + } + + public void execute() throws IOException { + + // case VALIDATION: + // check the target tables + checkTargetTables(tTableArray, isOverwrite); + // case RESTORE_IMAGES: + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(targetRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, + backupId); + restore(backupManifestMap, sTableArray, tTableArray, isOverwrite); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java deleted file mode 100644 index 2d41423..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java +++ /dev/null @@ -1,777 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.master; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupCopyService; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.impl.BackupException; -import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupManifest; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.backup.util.BackupServerUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; -import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; -import org.apache.hadoop.hbase.procedure.MasterProcedureManager; -import org.apache.hadoop.hbase.procedure.ProcedureUtil; -import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; - -@InterfaceAudience.Private -public class FullTableBackupProcedure - extends StateMachineProcedure - implements TableProcedureInterface { - private static final Log LOG = LogFactory.getLog(FullTableBackupProcedure.class); - - private static final String SNAPSHOT_BACKUP_MAX_ATTEMPTS_KEY = "hbase.backup.snapshot.attempts.max"; - private static final int DEFAULT_SNAPSHOT_BACKUP_MAX_ATTEMPTS = 10; - - private static final String SNAPSHOT_BACKUP_ATTEMPTS_DELAY_KEY = "hbase.backup.snapshot.attempts.delay"; - private static final int DEFAULT_SNAPSHOT_BACKUP_ATTEMPTS_DELAY = 10000; - - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Configuration conf; - private String backupId; - private List tableList; - private String targetRootDir; - HashMap newTimestamps = null; - - private BackupManager backupManager; - private BackupInfo backupContext; - - public FullTableBackupProcedure() { - // Required by the Procedure framework to create the procedure on replay - } - - public FullTableBackupProcedure(final MasterProcedureEnv env, - final String backupId, List tableList, String targetRootDir, final int workers, - final long bandwidth) throws IOException { - backupManager = new BackupManager(env.getMasterServices().getConnection(), - env.getMasterConfiguration()); - this.backupId = backupId; - this.tableList = tableList; - this.targetRootDir = targetRootDir; - backupContext = - backupManager.createBackupContext(backupId, BackupType.FULL, - tableList, targetRootDir, workers, bandwidth); - if (tableList == null || tableList.isEmpty()) { - this.tableList = new ArrayList<>(backupContext.getTables()); - } - this.setOwner(env.getRequestUser().getUGI().getShortUserName()); - } - - @Override - public byte[] getResult() { - return backupId.getBytes(); - } - - /** - * Begin the overall backup. - * @param backupContext backup context - * @throws IOException exception - */ - static void beginBackup(BackupManager backupManager, BackupInfo backupContext) - throws IOException { - backupManager.setBackupContext(backupContext); - // set the start timestamp of the overall backup - long startTs = EnvironmentEdgeManager.currentTime(); - backupContext.setStartTs(startTs); - // set overall backup status: ongoing - backupContext.setState(BackupState.RUNNING); - LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); - - backupManager.updateBackupInfo(backupContext); - if (LOG.isDebugEnabled()) { - LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); - } - } - - private static String getMessage(Exception e) { - String msg = e.getMessage(); - if (msg == null || msg.equals("")) { - msg = e.getClass().getName(); - } - return msg; - } - - /** - * Delete HBase snapshot for backup. - * @param backupCtx backup context - * @throws Exception exception - */ - private static void deleteSnapshot(final MasterProcedureEnv env, - BackupInfo backupCtx, Configuration conf) - throws IOException { - LOG.debug("Trying to delete snapshot for full backup."); - for (String snapshotName : backupCtx.getSnapshotNames()) { - if (snapshotName == null) { - continue; - } - LOG.debug("Trying to delete snapshot: " + snapshotName); - HBaseProtos.SnapshotDescription.Builder builder = - HBaseProtos.SnapshotDescription.newBuilder(); - builder.setName(snapshotName); - try { - env.getMasterServices().getSnapshotManager().deleteSnapshot(builder.build()); - } catch (IOException ioe) { - LOG.debug("when deleting snapshot " + snapshotName, ioe); - } - LOG.debug("Deleting the snapshot " + snapshotName + " for backup " - + backupCtx.getBackupId() + " succeeded."); - } - } - - /** - * Clean up directories with prefix "exportSnapshot-", which are generated when exporting - * snapshots. - * @throws IOException exception - */ - private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { - FileSystem fs = FSUtils.getCurrentFileSystem(conf); - Path stagingDir = - new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() - .toString())); - FileStatus[] files = FSUtils.listStatus(fs, stagingDir); - if (files == null) { - return; - } - for (FileStatus file : files) { - if (file.getPath().getName().startsWith("exportSnapshot-")) { - LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); - if (FSUtils.delete(fs, file.getPath(), true) == false) { - LOG.warn("Can not delete " + file.getPath()); - } - } - } - } - - /** - * Clean up the uncompleted data at target directory if the ongoing backup has already entered the - * copy phase. - */ - static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { - try { - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - LOG.debug("Trying to cleanup up target dir. Current backup phase: " - + backupContext.getPhase()); - if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) - || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) - || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { - FileSystem outputFs = - FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); - - // now treat one backup as a transaction, clean up data that has been partially copied at - // table level - for (TableName table : backupContext.getTables()) { - Path targetDirPath = - new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), - backupContext.getBackupId(), table)); - if (outputFs.delete(targetDirPath, true)) { - LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() - + " done."); - } else { - LOG.info("No data has been copied to " + targetDirPath.toString() + "."); - } - - Path tableDir = targetDirPath.getParent(); - FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); - if (backups == null || backups.length == 0) { - outputFs.delete(tableDir, true); - LOG.debug(tableDir.toString() + " is empty, remove it."); - } - } - } - - } catch (IOException e1) { - LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " - + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); - } - } - - /** - * Fail the overall backup. - * @param backupContext backup context - * @param e exception - * @throws Exception exception - */ - static void failBackup(final MasterProcedureEnv env, BackupInfo backupContext, - BackupManager backupManager, Exception e, - String msg, BackupType type, Configuration conf) throws IOException { - LOG.error(msg + getMessage(e)); - // If this is a cancel exception, then we've already cleaned. - - // set the failure timestamp of the overall backup - backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); - - // set failure message - backupContext.setFailedMsg(e.getMessage()); - - // set overall backup status: failed - backupContext.setState(BackupState.FAILED); - - // compose the backup failed data - String backupFailedData = - "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() - + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() - + ",failedmessage=" + backupContext.getFailedMsg(); - LOG.error(backupFailedData); - - backupManager.updateBackupInfo(backupContext); - - // if full backup, then delete HBase snapshots if there already are snapshots taken - // and also clean up export snapshot log files if exist - if (type == BackupType.FULL) { - deleteSnapshot(env, backupContext, conf); - cleanupExportSnapshotLog(conf); - } - - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - cleanupTargetDir(backupContext, conf); - - LOG.info("Backup " + backupContext.getBackupId() + " failed."); - } - - /** - * Do snapshot copy. - * @param backupContext backup context - * @throws Exception exception - */ - private void snapshotCopy(BackupInfo backupContext) throws Exception { - LOG.info("Snapshot copy is starting."); - - // set overall backup phase: snapshot_copy - backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); - - // call ExportSnapshot to copy files based on hbase snapshot for backup - // ExportSnapshot only support single snapshot export, need loop for multiple tables case - BackupCopyService copyService = BackupRestoreServerFactory.getBackupCopyService(conf); - - // number of snapshots matches number of tables - float numOfSnapshots = backupContext.getSnapshotNames().size(); - - LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); - - for (TableName table : backupContext.getTables()) { - // Currently we simply set the sub copy tasks by counting the table snapshot number, we can - // calculate the real files' size for the percentage in the future. - // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); - int res = 0; - String[] args = new String[4]; - args[0] = "-snapshot"; - args[1] = backupContext.getSnapshotName(table); - args[2] = "-copy-to"; - args[3] = backupContext.getBackupStatus(table).getTargetDir(); - - LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); - res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); - // if one snapshot export failed, do not continue for remained snapshots - if (res != 0) { - LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); - - throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] - + " with reason code " + res); - } - LOG.info("Snapshot copy " + args[1] + " finished."); - } - } - - /** - * Add manifest for the current backup. The manifest is stored - * within the table backup directory. - * @param backupContext The current backup context - * @throws IOException exception - * @throws BackupException exception - */ - private static void addManifest(BackupInfo backupContext, BackupManager backupManager, - BackupType type, Configuration conf) throws IOException, BackupException { - // set the overall backup phase : store manifest - backupContext.setPhase(BackupPhase.STORE_MANIFEST); - - BackupManifest manifest; - - // Since we have each table's backup in its own directory structure, - // we'll store its manifest with the table directory. - for (TableName table : backupContext.getTables()) { - manifest = new BackupManifest(backupContext, table); - ArrayList ancestors = backupManager.getAncestors(backupContext, table); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - - if (type == BackupType.INCREMENTAL) { - // We'll store the log timestamps for this table only in its manifest. - HashMap> tableTimestampMap = - new HashMap>(); - tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); - manifest.setIncrTimestampMap(tableTimestampMap); - ArrayList ancestorss = backupManager.getAncestors(backupContext); - for (BackupImage image : ancestorss) { - manifest.addDependentImage(image); - } - } - manifest.store(conf); - } - - // For incremental backup, we store a overall manifest in - // /WALs/ - // This is used when created the next incremental backup - if (type == BackupType.INCREMENTAL) { - manifest = new BackupManifest(backupContext); - // set the table region server start and end timestamps for incremental backup - manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); - ArrayList ancestors = backupManager.getAncestors(backupContext); - for (BackupImage image : ancestors) { - manifest.addDependentImage(image); - } - manifest.store(conf); - } - } - - /** - * Get backup request meta data dir as string. - * @param backupContext backup context - * @return meta data dir - */ - private static String obtainBackupMetaDataStr(BackupInfo backupContext) { - StringBuffer sb = new StringBuffer(); - sb.append("type=" + backupContext.getType() + ",tablelist="); - for (TableName table : backupContext.getTables()) { - sb.append(table + ";"); - } - if (sb.lastIndexOf(";") > 0) { - sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); - } - sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); - - return sb.toString(); - } - - /** - * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying - * hlogs. - * @throws IOException exception - */ - private static void cleanupDistCpLog(BackupInfo backupContext, Configuration conf) - throws IOException { - Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); - FileSystem fs = FileSystem.get(rootPath.toUri(), conf); - FileStatus[] files = FSUtils.listStatus(fs, rootPath); - if (files == null) { - return; - } - for (FileStatus file : files) { - if (file.getPath().getName().startsWith("_distcp_logs")) { - LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); - FSUtils.delete(fs, file.getPath(), true); - } - } - } - - /** - * Complete the overall backup. - * @param backupContext backup context - * @throws Exception exception - */ - static void completeBackup(final MasterProcedureEnv env, BackupInfo backupContext, - BackupManager backupManager, BackupType type, Configuration conf) throws IOException { - // set the complete timestamp of the overall backup - backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); - // set overall backup status: complete - backupContext.setState(BackupState.COMPLETE); - backupContext.setProgress(100); - // add and store the manifest for the backup - addManifest(backupContext, backupManager, type, conf); - - // after major steps done and manifest persisted, do convert if needed for incremental backup - /* in-fly convert code here, provided by future jira */ - LOG.debug("in-fly convert code here, provided by future jira"); - - // compose the backup complete data - String backupCompleteData = - obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() - + ",completets=" + backupContext.getEndTs() + ",bytescopied=" - + backupContext.getTotalBytesCopied(); - if (LOG.isDebugEnabled()) { - LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); - } - backupManager.updateBackupInfo(backupContext); - - // when full backup is done: - // - delete HBase snapshot - // - clean up directories with prefix "exportSnapshot-", which are generated when exporting - // snapshots - if (type == BackupType.FULL) { - deleteSnapshot(env, backupContext, conf); - cleanupExportSnapshotLog(conf); - } else if (type == BackupType.INCREMENTAL) { - cleanupDistCpLog(backupContext, conf); - } - - LOG.info("Backup " + backupContext.getBackupId() + " completed."); - } - - /** - * Wrap a SnapshotDescription for a target table. - * @param table table - * @return a SnapshotDescription especially for backup. - */ - static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { - // Mock a SnapshotDescription from backupContext to call SnapshotManager function, - // Name it in the format "snapshot__

" - HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); - builder.setTable(tableName.getNameAsString()); - builder.setName(snapshotName); - HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); - - LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() - + " from backupContext to request snapshot for backup."); - - return backupSnapshot; - } - - @Override - protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state) - throws InterruptedException { - if (conf == null) { - conf = env.getMasterConfiguration(); - } - if (backupManager == null) { - try { - backupManager = new BackupManager(env.getMasterServices().getConnection(), - env.getMasterConfiguration()); - } catch (IOException ioe) { - setFailure("full backup", ioe); - return Flow.NO_MORE_STATE; - } - } - if (LOG.isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); - } - try { - switch (state) { - case PRE_SNAPSHOT_TABLE: - beginBackup(backupManager, backupContext); - String savedStartCode = null; - boolean firstBackup = false; - // do snapshot for full table backup - - try { - savedStartCode = backupManager.readBackupStartCode(); - firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L; - if (firstBackup) { - // This is our first backup. Let's put some marker on ZK so that we can hold the logs - // while we do the backup. - backupManager.writeBackupStartCode(0L); - } - // We roll log here before we do the snapshot. It is possible there is duplicate data - // in the log that is already in the snapshot. But if we do it after the snapshot, we - // could have data loss. - // A better approach is to do the roll log on each RS in the same global procedure as - // the snapshot. - LOG.info("Execute roll log procedure for full backup ..."); - MasterProcedureManager mpm = env.getMasterServices().getMasterProcedureManagerHost() - .getProcedureManager(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); - Map props= new HashMap(); - props.put("backupRoot", backupContext.getTargetRootDir()); - long waitTime = ProcedureUtil.execProcedure(mpm, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); - ProcedureUtil.waitForProcedure(mpm, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, - LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props, waitTime, - conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER), - conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE)); - - newTimestamps = backupManager.readRegionServerLastLogRollResult(); - if (firstBackup) { - // Updates registered log files - // We record ALL old WAL files as registered, because - // this is a first full backup in the system and these - // files are not needed for next incremental backup - List logFiles = BackupServerUtil.getWALFilesOlderThan(conf, newTimestamps); - backupManager.recordWALFiles(logFiles); - } - } catch (BackupException e) { - setFailure("Failure in full-backup: pre-snapshot phase", e); - // fail the overall backup and return - failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", - BackupType.FULL, conf); - return Flow.NO_MORE_STATE; - } - setNextState(FullTableBackupState.SNAPSHOT_TABLES); - break; - case SNAPSHOT_TABLES: - for (TableName tableName : tableList) { - String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) - + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); - HBaseProtos.SnapshotDescription backupSnapshot; - - // wrap a SnapshotDescription for offline/online snapshot - backupSnapshot = wrapSnapshotDescription(tableName,snapshotName); - try { - env.getMasterServices().getSnapshotManager().deleteSnapshot(backupSnapshot); - } catch (IOException e) { - LOG.debug("Unable to delete " + snapshotName, e); - } - // Kick off snapshot for backup - snapshotTable(env, backupSnapshot); - backupContext.setSnapshotName(tableName, backupSnapshot.getName()); - } - setNextState(FullTableBackupState.SNAPSHOT_COPY); - break; - case SNAPSHOT_COPY: - // do snapshot copy - LOG.debug("snapshot copy for " + backupId); - try { - this.snapshotCopy(backupContext); - } catch (Exception e) { - setFailure("Failure in full-backup: snapshot copy phase" + backupId, e); - // fail the overall backup and return - failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", - BackupType.FULL, conf); - return Flow.NO_MORE_STATE; - } - // Updates incremental backup table set - backupManager.addIncrementalBackupTableSet(backupContext.getTables()); - setNextState(FullTableBackupState.BACKUP_COMPLETE); - break; - - case BACKUP_COMPLETE: - // set overall backup status: complete. Here we make sure to complete the backup. - // After this checkpoint, even if entering cancel process, will let the backup finished - backupContext.setState(BackupState.COMPLETE); - // The table list in backupContext is good for both full backup and incremental backup. - // For incremental backup, it contains the incremental backup table set. - backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); - - HashMap> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); - - Long newStartCode = - BackupClientUtil.getMinValue(BackupServerUtil.getRSLogTimestampMins(newTableSetTimestampMap)); - backupManager.writeBackupStartCode(newStartCode); - - // backup complete - completeBackup(env, backupContext, backupManager, BackupType.FULL, conf); - return Flow.NO_MORE_STATE; - - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } catch (IOException e) { - LOG.error("Backup failed in " + state); - setFailure("snapshot-table", e); - } - return Flow.HAS_MORE_STATE; - } - - private void snapshotTable(final MasterProcedureEnv env, SnapshotDescription backupSnapshot) - throws IOException - { - - int maxAttempts = env.getMasterConfiguration().getInt(SNAPSHOT_BACKUP_MAX_ATTEMPTS_KEY, - DEFAULT_SNAPSHOT_BACKUP_MAX_ATTEMPTS); - int delay = env.getMasterConfiguration().getInt(SNAPSHOT_BACKUP_ATTEMPTS_DELAY_KEY, - DEFAULT_SNAPSHOT_BACKUP_ATTEMPTS_DELAY); - int attempts = 0; - - while (attempts++ < maxAttempts) { - try { - env.getMasterServices().getSnapshotManager().takeSnapshot(backupSnapshot); - long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout( - env.getMasterConfiguration(), - backupSnapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); - BackupServerUtil.waitForSnapshot(backupSnapshot, waitTime, - env.getMasterServices().getSnapshotManager(), env.getMasterConfiguration()); - break; - } catch( NotServingRegionException ee) { - LOG.warn("Snapshot attempt "+attempts +" failed for table "+backupSnapshot.getTable() + - ", sleeping for " + delay+"ms", ee); - if(attempts < maxAttempts) { - try { - Thread.sleep(delay); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - } - } - @Override - protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state) - throws IOException { - if (state != FullTableBackupState.PRE_SNAPSHOT_TABLE) { - deleteSnapshot(env, backupContext, conf); - cleanupExportSnapshotLog(conf); - } - - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - if (state == FullTableBackupState.SNAPSHOT_COPY) { - cleanupTargetDir(backupContext, conf); - } - } - - @Override - protected FullTableBackupState getState(final int stateId) { - return FullTableBackupState.valueOf(stateId); - } - - @Override - protected int getStateId(final FullTableBackupState state) { - return state.getNumber(); - } - - @Override - protected FullTableBackupState getInitialState() { - return FullTableBackupState.PRE_SNAPSHOT_TABLE; - } - - @Override - protected void setNextState(final FullTableBackupState state) { - if (aborted.get()) { - setAbortFailure("backup-table", "abort requested"); - } else { - super.setNextState(state); - } - } - - @Override - public boolean abort(final MasterProcedureEnv env) { - aborted.set(true); - return true; - } - - @Override - public void toStringClassDetails(StringBuilder sb) { - sb.append(getClass().getSimpleName()); - sb.append(" (targetRootDir="); - sb.append(targetRootDir); - sb.append("; backupId=").append(backupId); - sb.append("; tables="); - int len = tableList.size(); - for (int i = 0; i < len-1; i++) { - sb.append(tableList.get(i)).append(","); - } - sb.append(tableList.get(len-1)); - sb.append(")"); - } - - BackupProtos.BackupProcContext toBackupContext() { - BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); - ctxBuilder.setCtx(backupContext.toProtosBackupInfo()); - if (newTimestamps != null && !newTimestamps.isEmpty()) { - BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); - for (Entry entry : newTimestamps.entrySet()) { - tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); - ctxBuilder.addServerTimestamp(tsBuilder.build()); - } - } - return ctxBuilder.build(); - } - - @Override - public void serializeStateData(final OutputStream stream) throws IOException { - super.serializeStateData(stream); - - BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); - backupProcCtx.writeDelimitedTo(stream); - } - - @Override - public void deserializeStateData(final InputStream stream) throws IOException { - super.deserializeStateData(stream); - - BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); - backupContext = BackupInfo.fromProto(proto.getCtx()); - backupId = backupContext.getBackupId(); - targetRootDir = backupContext.getTargetRootDir(); - tableList = backupContext.getTableNames(); - List svrTimestamps = proto.getServerTimestampList(); - if (svrTimestamps != null && !svrTimestamps.isEmpty()) { - newTimestamps = new HashMap<>(); - for (ServerTimestamp ts : svrTimestamps) { - newTimestamps.put(ts.getServer(), ts.getTimestamp()); - } - } - } - - @Override - public TableName getTableName() { - return TableName.BACKUP_TABLE_NAME; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.EDIT; - } - - @Override - protected boolean acquireLock(final MasterProcedureEnv env) { - if (env.waitInitialized(this)) { - return false; - } - return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } - - @Override - protected void releaseLock(final MasterProcedureEnv env) { - env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java deleted file mode 100644 index e877ebd..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java +++ /dev/null @@ -1,400 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.master; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupCopyService; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.BackupCopyService.Type; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; -import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; -import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.backup.util.BackupServerUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; -import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; -import org.apache.hadoop.security.UserGroupInformation; - -@InterfaceAudience.Private -public class IncrementalTableBackupProcedure - extends StateMachineProcedure - implements TableProcedureInterface { - private static final Log LOG = LogFactory.getLog(IncrementalTableBackupProcedure.class); - - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Configuration conf; - private String backupId; - private List tableList; - private String targetRootDir; - HashMap newTimestamps = null; - - private BackupManager backupManager; - private BackupInfo backupContext; - - public IncrementalTableBackupProcedure() { - // Required by the Procedure framework to create the procedure on replay - } - - public IncrementalTableBackupProcedure(final MasterProcedureEnv env, - final String backupId, - List tableList, String targetRootDir, final int workers, - final long bandwidth) throws IOException { - backupManager = new BackupManager(env.getMasterServices().getConnection(), - env.getMasterConfiguration()); - this.backupId = backupId; - this.tableList = tableList; - this.targetRootDir = targetRootDir; - backupContext = backupManager.createBackupContext(backupId, - BackupType.INCREMENTAL, tableList, targetRootDir, workers, (int)bandwidth); - this.setOwner(env.getRequestUser().getUGI().getShortUserName()); - } - - @Override - public byte[] getResult() { - return backupId.getBytes(); - } - - private List filterMissingFiles(List incrBackupFileList) throws IOException { - FileSystem fs = FileSystem.get(conf); - List list = new ArrayList(); - for (String file : incrBackupFileList) { - if (fs.exists(new Path(file))) { - list.add(file); - } else { - LOG.warn("Can't find file: " + file); - } - } - return list; - } - - private List getMissingFiles(List incrBackupFileList) throws IOException { - FileSystem fs = FileSystem.get(conf); - List list = new ArrayList(); - for (String file : incrBackupFileList) { - if (!fs.exists(new Path(file))) { - list.add(file); - } - } - return list; - - } - - /** - * Do incremental copy. - * @param backupContext backup context - */ - private void incrementalCopy(BackupInfo backupContext) throws Exception { - - LOG.info("Incremental copy is starting."); - // set overall backup phase: incremental_copy - backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); - // get incremental backup file list and prepare parms for DistCp - List incrBackupFileList = backupContext.getIncrBackupFileList(); - // filter missing files out (they have been copied by previous backups) - incrBackupFileList = filterMissingFiles(incrBackupFileList); - String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); - strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); - - BackupCopyService copyService = BackupRestoreServerFactory.getBackupCopyService(conf); - int counter = 0; - int MAX_ITERAIONS = 2; - while (counter++ < MAX_ITERAIONS) { - // We run DistCp maximum 2 times - // If it fails on a second time, we throw Exception - int res = copyService.copy(backupContext, backupManager, conf, - BackupCopyService.Type.INCREMENTAL, strArr); - - if (res != 0) { - LOG.error("Copy incremental log files failed with return code: " + res + "."); - throw new IOException("Failed of Hadoop Distributed Copy from "+ - StringUtils.join(incrBackupFileList, ",") +" to " - + backupContext.getHLogTargetDir()); - } - List missingFiles = getMissingFiles(incrBackupFileList); - - if(missingFiles.isEmpty()) { - break; - } else { - // Repeat DistCp, some files have been moved from WALs to oldWALs during previous run - // update backupContext and strAttr - if(counter == MAX_ITERAIONS){ - String msg = "DistCp could not finish the following files: " + - StringUtils.join(missingFiles, ","); - LOG.error(msg); - throw new IOException(msg); - } - List converted = convertFilesFromWALtoOldWAL(missingFiles); - incrBackupFileList.removeAll(missingFiles); - incrBackupFileList.addAll(converted); - backupContext.setIncrBackupFileList(incrBackupFileList); - - // Run DistCp only for missing files (which have been moved from WALs to oldWALs - // during previous run) - strArr = converted.toArray(new String[converted.size() + 1]); - strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); - } - } - - - LOG.info("Incremental copy from " + StringUtils.join(incrBackupFileList, ",") + " to " - + backupContext.getHLogTargetDir() + " finished."); - } - - - private List convertFilesFromWALtoOldWAL(List missingFiles) throws IOException { - List list = new ArrayList(); - for(String path: missingFiles){ - if(path.indexOf(Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME) < 0) { - LOG.error("Copy incremental log files failed, file is missing : " + path); - throw new IOException("Failed of Hadoop Distributed Copy to " - + backupContext.getHLogTargetDir()+", file is missing "+ path); - } - list.add(path.replace(Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME, - Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME)); - } - return list; - } - - @Override - protected Flow executeFromState(final MasterProcedureEnv env, - final IncrementalTableBackupState state) - throws InterruptedException { - if (conf == null) { - conf = env.getMasterConfiguration(); - } - if (backupManager == null) { - try { - backupManager = new BackupManager(env.getMasterServices().getConnection(), - env.getMasterConfiguration()); - } catch (IOException ioe) { - setFailure("incremental backup", ioe); - } - } - if (LOG.isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); - } - try { - switch (state) { - case PREPARE_INCREMENTAL: - FullTableBackupProcedure.beginBackup(backupManager, backupContext); - LOG.debug("For incremental backup, current table set is " - + backupManager.getIncrementalBackupTableSet()); - try { - IncrementalBackupManager incrBackupManager =new IncrementalBackupManager(backupManager); - - newTimestamps = incrBackupManager.getIncrBackupLogFileList(env.getMasterServices(), - backupContext); - } catch (Exception e) { - setFailure("Failure in incremental-backup: preparation phase " + backupId, e); - // fail the overall backup and return - FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, - "Unexpected Exception : ", BackupType.INCREMENTAL, conf); - } - - setNextState(IncrementalTableBackupState.INCREMENTAL_COPY); - break; - case INCREMENTAL_COPY: - try { - // copy out the table and region info files for each table - BackupServerUtil.copyTableRegionInfo(env.getMasterServices(), backupContext, conf); - incrementalCopy(backupContext); - // Save list of WAL files copied - backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); - } catch (Exception e) { - String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; - setFailure(msg, e); - // fail the overall backup and return - FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, - msg, BackupType.INCREMENTAL, conf); - } - setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE); - break; - case INCR_BACKUP_COMPLETE: - // set overall backup status: complete. Here we make sure to complete the backup. - // After this checkpoint, even if entering cancel process, will let the backup finished - backupContext.setState(BackupState.COMPLETE); - // Set the previousTimestampMap which is before this current log roll to the manifest. - HashMap> previousTimestampMap = - backupManager.readLogTimestampMap(); - backupContext.setIncrTimestampMap(previousTimestampMap); - - // The table list in backupContext is good for both full backup and incremental backup. - // For incremental backup, it contains the incremental backup table set. - backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); - - HashMap> newTableSetTimestampMap = - backupManager.readLogTimestampMap(); - - Long newStartCode = BackupClientUtil - .getMinValue(BackupServerUtil.getRSLogTimestampMins(newTableSetTimestampMap)); - backupManager.writeBackupStartCode(newStartCode); - // backup complete - FullTableBackupProcedure.completeBackup(env, backupContext, backupManager, - BackupType.INCREMENTAL, conf); - return Flow.NO_MORE_STATE; - - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } catch (IOException e) { - setFailure("snapshot-table", e); - } - return Flow.HAS_MORE_STATE; - } - - @Override - protected void rollbackState(final MasterProcedureEnv env, - final IncrementalTableBackupState state) throws IOException { - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - FullTableBackupProcedure.cleanupTargetDir(backupContext, conf); - } - - @Override - protected IncrementalTableBackupState getState(final int stateId) { - return IncrementalTableBackupState.valueOf(stateId); - } - - @Override - protected int getStateId(final IncrementalTableBackupState state) { - return state.getNumber(); - } - - @Override - protected IncrementalTableBackupState getInitialState() { - return IncrementalTableBackupState.PREPARE_INCREMENTAL; - } - - @Override - protected void setNextState(final IncrementalTableBackupState state) { - if (aborted.get()) { - setAbortFailure("snapshot-table", "abort requested"); - } else { - super.setNextState(state); - } - } - - @Override - public boolean abort(final MasterProcedureEnv env) { - aborted.set(true); - return true; - } - - @Override - public void toStringClassDetails(StringBuilder sb) { - sb.append(getClass().getSimpleName()); - sb.append(" (targetRootDir="); - sb.append(targetRootDir); - sb.append("; backupId=").append(backupId); - sb.append("; tables="); - int len = tableList.size(); - for (int i = 0; i < len-1; i++) { - sb.append(tableList.get(i)).append(","); - } - sb.append(tableList.get(len-1)); - sb.append(")"); - } - - BackupProtos.BackupProcContext toBackupContext() { - BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); - ctxBuilder.setCtx(backupContext.toProtosBackupInfo()); - if (newTimestamps != null && !newTimestamps.isEmpty()) { - BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); - for (Entry entry : newTimestamps.entrySet()) { - tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); - ctxBuilder.addServerTimestamp(tsBuilder.build()); - } - } - return ctxBuilder.build(); - } - - @Override - public void serializeStateData(final OutputStream stream) throws IOException { - super.serializeStateData(stream); - - BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); - backupProcCtx.writeDelimitedTo(stream); - } - - @Override - public void deserializeStateData(final InputStream stream) throws IOException { - super.deserializeStateData(stream); - - BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); - backupContext = BackupInfo.fromProto(proto.getCtx()); - backupId = backupContext.getBackupId(); - targetRootDir = backupContext.getTargetRootDir(); - tableList = backupContext.getTableNames(); - List svrTimestamps = proto.getServerTimestampList(); - if (svrTimestamps != null && !svrTimestamps.isEmpty()) { - newTimestamps = new HashMap<>(); - for (ServerTimestamp ts : svrTimestamps) { - newTimestamps.put(ts.getServer(), ts.getTimestamp()); - } - } - } - - @Override - public TableName getTableName() { - return TableName.BACKUP_TABLE_NAME; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.EDIT; - } - - @Override - protected boolean acquireLock(final MasterProcedureEnv env) { - if (env.waitInitialized(this)) { - return false; - } - return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } - - @Override - protected void releaseLock(final MasterProcedureEnv env) { - env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/RestoreTablesProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/RestoreTablesProcedure.java deleted file mode 100644 index 8fd7621..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/RestoreTablesProcedure.java +++ /dev/null @@ -1,387 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.master; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.TreeSet; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.impl.BackupManifest; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.TableStateManager; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; -import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesState; - -@InterfaceAudience.Private -public class RestoreTablesProcedure - extends StateMachineProcedure - implements TableProcedureInterface { - private static final Log LOG = LogFactory.getLog(RestoreTablesProcedure.class); - - private final AtomicBoolean aborted = new AtomicBoolean(false); - private Configuration conf; - private String backupId; - private List sTableList; - private List tTableList; - private String targetRootDir; - private boolean isOverwrite; - - public RestoreTablesProcedure() { - // Required by the Procedure framework to create the procedure on replay - } - - public RestoreTablesProcedure(final MasterProcedureEnv env, - final String targetRootDir, String backupId, List sTableList, - List tTableList, boolean isOverwrite) throws IOException { - this.targetRootDir = targetRootDir; - this.backupId = backupId; - this.sTableList = sTableList; - this.tTableList = tTableList; - if (tTableList == null || tTableList.isEmpty()) { - this.tTableList = sTableList; - } - this.isOverwrite = isOverwrite; - this.setOwner(env.getRequestUser().getUGI().getShortUserName()); - } - - @Override - public byte[] getResult() { - return null; - } - - /** - * Validate target Tables - * @param conn connection - * @param mgr table state manager - * @param tTableArray: target tables - * @param isOverwrite overwrite existing table - * @throws IOException exception - */ - private void checkTargetTables(Connection conn, TableStateManager mgr, TableName[] tTableArray, - boolean isOverwrite) - throws IOException { - ArrayList existTableList = new ArrayList<>(); - ArrayList disabledTableList = new ArrayList<>(); - - // check if the tables already exist - for (TableName tableName : tTableArray) { - if (MetaTableAccessor.tableExists(conn, tableName)) { - existTableList.add(tableName); - if (mgr.isTableState(tableName, TableState.State.DISABLED, TableState.State.DISABLING)) { - disabledTableList.add(tableName); - } - } else { - LOG.info("HBase table " + tableName - + " does not exist. It will be created during restore process"); - } - } - - if (existTableList.size() > 0) { - if (!isOverwrite) { - LOG.error("Existing table (" + existTableList + ") found in the restore target, please add " - + "\"-overwrite\" option in the command if you mean to restore to these existing tables"); - throw new IOException("Existing table found in target while no \"-overwrite\" " - + "option found"); - } else { - if (disabledTableList.size() > 0) { - LOG.error("Found offline table in the restore target, " - + "please enable them before restore with \"-overwrite\" option"); - LOG.info("Offline table list in restore target: " + disabledTableList); - throw new IOException( - "Found offline table in the target when restore with \"-overwrite\" option"); - } - } - } - } - - /** - * Restore operation handle each backupImage in array - * @param svc: master services - * @param images: array BackupImage - * @param sTable: table to be restored - * @param tTable: table to be restored to - * @param truncateIfExists: truncate table - * @throws IOException exception - */ - - private void restoreImages(MasterServices svc, BackupImage[] images, TableName sTable, TableName tTable, - boolean truncateIfExists) throws IOException { - - // First image MUST be image of a FULL backup - BackupImage image = images[0]; - String rootDir = image.getRootDir(); - String backupId = image.getBackupId(); - Path backupRoot = new Path(rootDir); - RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); - Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); - String lastIncrBackupId = images.length == 1 ? null : images[images.length - 1].getBackupId(); - // We need hFS only for full restore (see the code) - BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); - if (manifest.getType() == BackupType.FULL) { - LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from full" - + " backup image " + tableBackupPath.toString()); - restoreTool.fullRestoreTable(svc, tableBackupPath, sTable, tTable, truncateIfExists, - lastIncrBackupId); - } else { // incremental Backup - throw new IOException("Unexpected backup type " + image.getType()); - } - - if (images.length == 1) { - // full backup restore done - return; - } - - List dirList = new ArrayList(); - // add full backup path - // full backup path comes first - for (int i = 1; i < images.length; i++) { - BackupImage im = images[i]; - String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); - dirList.add(new Path(logBackupDir)); - } - - String dirs = StringUtils.join(dirList, ","); - LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from log dirs: " + dirs); - Path[] paths = new Path[dirList.size()]; - dirList.toArray(paths); - restoreTool.incrementalRestoreTable(svc, tableBackupPath, paths, new TableName[] { sTable }, - new TableName[] { tTable }, lastIncrBackupId); - LOG.info(sTable + " has been successfully restored to " + tTable); - - } - - /** - * Restore operation. Stage 2: resolved Backup Image dependency - * @param svc: master services - * @param backupManifestMap : tableName, Manifest - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @return set of BackupImages restored - * @throws IOException exception - */ - private void restore(MasterServices svc, HashMap backupManifestMap, - TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { - TreeSet restoreImageSet = new TreeSet(); - boolean truncateIfExists = isOverwrite; - try { - for (int i = 0; i < sTableArray.length; i++) { - TableName table = sTableArray[i]; - BackupManifest manifest = backupManifestMap.get(table); - // Get the image list of this backup for restore in time order from old - // to new. - List list = new ArrayList(); - list.add(manifest.getBackupImage()); - TreeSet set = new TreeSet(list); - List depList = manifest.getDependentListByTable(table); - set.addAll(depList); - BackupImage[] arr = new BackupImage[set.size()]; - set.toArray(arr); - restoreImages(svc, arr, table, tTableArray[i], truncateIfExists); - restoreImageSet.addAll(list); - if (restoreImageSet != null && !restoreImageSet.isEmpty()) { - LOG.info("Restore includes the following image(s):"); - for (BackupImage image : restoreImageSet) { - LOG.info("Backup: " - + image.getBackupId() - + " " - + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), - table)); - } - } - } - } catch (Exception e) { - LOG.error("Failed", e); - throw new IOException(e); - } - LOG.debug("restoreStage finished"); - } - - @Override - protected Flow executeFromState(final MasterProcedureEnv env, final RestoreTablesState state) - throws InterruptedException { - if (conf == null) { - conf = env.getMasterConfiguration(); - } - if (LOG.isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); - } - TableName[] tTableArray = tTableList.toArray(new TableName[tTableList.size()]); - try { - switch (state) { - case VALIDATION: - - // check the target tables - checkTargetTables(env.getMasterServices().getConnection(), - env.getMasterServices().getTableStateManager(), tTableArray, isOverwrite); - - setNextState(RestoreTablesState.RESTORE_IMAGES); - break; - case RESTORE_IMAGES: - TableName[] sTableArray = sTableList.toArray(new TableName[sTableList.size()]); - HashMap backupManifestMap = new HashMap<>(); - // check and load backup image manifest for the tables - Path rootPath = new Path(targetRootDir); - HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, - backupId); - restore(env.getMasterServices(), backupManifestMap, sTableArray, tTableArray, isOverwrite); - return Flow.NO_MORE_STATE; - default: - throw new UnsupportedOperationException("unhandled state=" + state); - } - } catch (IOException e) { - setFailure("restore-table", e); - } - return Flow.HAS_MORE_STATE; - } - - @Override - protected void rollbackState(final MasterProcedureEnv env, final RestoreTablesState state) - throws IOException { - } - - @Override - protected RestoreTablesState getState(final int stateId) { - return RestoreTablesState.valueOf(stateId); - } - - @Override - protected int getStateId(final RestoreTablesState state) { - return state.getNumber(); - } - - @Override - protected RestoreTablesState getInitialState() { - return RestoreTablesState.VALIDATION; - } - - @Override - protected void setNextState(final RestoreTablesState state) { - if (aborted.get()) { - setAbortFailure("snapshot-table", "abort requested"); - } else { - super.setNextState(state); - } - } - - @Override - public boolean abort(final MasterProcedureEnv env) { - aborted.set(true); - return true; - } - - @Override - public void toStringClassDetails(StringBuilder sb) { - sb.append(getClass().getSimpleName()); - sb.append(" (targetRootDir="); - sb.append(targetRootDir); - sb.append(" isOverwrite= "); - sb.append(isOverwrite); - sb.append(" backupId= "); - sb.append(backupId); - sb.append(")"); - } - - MasterProtos.RestoreTablesRequest toRestoreTables() { - MasterProtos.RestoreTablesRequest.Builder bldr = MasterProtos.RestoreTablesRequest.newBuilder(); - bldr.setOverwrite(isOverwrite).setBackupId(backupId); - bldr.setBackupRootDir(targetRootDir); - for (TableName table : sTableList) { - bldr.addTables(ProtobufUtil.toProtoTableName(table)); - } - for (TableName table : tTableList) { - bldr.addTargetTables(ProtobufUtil.toProtoTableName(table)); - } - return bldr.build(); - } - - @Override - public void serializeStateData(final OutputStream stream) throws IOException { - super.serializeStateData(stream); - - MasterProtos.RestoreTablesRequest restoreTables = toRestoreTables(); - restoreTables.writeDelimitedTo(stream); - } - - @Override - public void deserializeStateData(final InputStream stream) throws IOException { - super.deserializeStateData(stream); - - MasterProtos.RestoreTablesRequest proto = - MasterProtos.RestoreTablesRequest.parseDelimitedFrom(stream); - backupId = proto.getBackupId(); - targetRootDir = proto.getBackupRootDir(); - isOverwrite = proto.getOverwrite(); - sTableList = new ArrayList<>(proto.getTablesList().size()); - for (HBaseProtos.TableName table : proto.getTablesList()) { - sTableList.add(ProtobufUtil.toTableName(table)); - } - tTableList = new ArrayList<>(proto.getTargetTablesList().size()); - for (HBaseProtos.TableName table : proto.getTargetTablesList()) { - tTableList.add(ProtobufUtil.toTableName(table)); - } - } - - @Override - public TableName getTableName() { - return TableName.BACKUP_TABLE_NAME; - } - - @Override - public TableOperationType getTableOperationType() { - return TableOperationType.EDIT; - } - - @Override - protected boolean acquireLock(final MasterProcedureEnv env) { - if (env.waitInitialized(this)) { - return false; - } - return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } - - @Override - protected void releaseLock(final MasterProcedureEnv env) { - env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java new file mode 100644 index 0000000..c22f51b --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java @@ -0,0 +1,437 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.util; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * A collection of methods used by multiple classes to backup HBase tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupClientUtil { + protected static final Log LOG = LogFactory.getLog(BackupClientUtil.class); + public static final String LOGNAME_SEPARATOR = "."; + + private BackupClientUtil() { + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Check whether the backup path exist + * @param backupStr backup + * @param conf configuration + * @return Yes if path exists + * @throws IOException exception + */ + public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException { + boolean isExist = false; + Path backupPath = new Path(backupStr); + FileSystem fileSys = backupPath.getFileSystem(conf); + String targetFsScheme = fileSys.getUri().getScheme(); + if (LOG.isTraceEnabled()) { + LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme); + } + if (fileSys.exists(backupPath)) { + isExist = true; + } + return isExist; + } + + // check target path first, confirm it doesn't exist before backup + public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException { + boolean targetExists = false; + try { + targetExists = checkPathExist(backupRootPath, conf); + } catch (IOException e) { + String expMsg = e.getMessage(); + String newMsg = null; + if (expMsg.contains("No FileSystem for scheme")) { + newMsg = + "Unsupported filesystem scheme found in the backup target url. Error Message: " + + newMsg; + LOG.error(newMsg); + throw new IOException(newMsg); + } else { + throw e; + } + } + + if (targetExists) { + LOG.info("Using existing backup root dir: " + backupRootPath); + } else { + LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created."); + } + } + + /** + * Get the min value for all the Values a map. + * @param map map + * @return the min value + */ + public static Long getMinValue(HashMap map) { + Long minTimestamp = null; + if (map != null) { + ArrayList timestampList = new ArrayList(map.values()); + Collections.sort(timestampList); + // The min among all the RS log timestamps will be kept in hbase:backup table. + minTimestamp = timestampList.get(0); + } + return minTimestamp; + } + + /** + * Parses host name:port from archived WAL path + * @param p path + * @return host name + * @throws IOException exception + */ + public static String parseHostFromOldLog(Path p) { + try { + String n = p.getName(); + int idx = n.lastIndexOf(LOGNAME_SEPARATOR); + String s = URLDecoder.decode(n.substring(0, idx), "UTF8"); + return ServerName.parseHostname(s) + ":" + ServerName.parsePort(s); + } catch (Exception e) { + LOG.warn("Skip log file (can't parse): " + p); + return null; + } + } + + /** + * Given the log file, parse the timestamp from the file name. The timestamp is the last number. + * @param p a path to the log file + * @return the timestamp + * @throws IOException exception + */ + public static Long getCreationTime(Path p) throws IOException { + int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR); + if (idx < 0) { + throw new IOException("Cannot parse timestamp from path " + p); + } + String ts = p.getName().substring(idx + 1); + return Long.parseLong(ts); + } + + public static List getFiles(FileSystem fs, Path rootDir, List files, + PathFilter filter) throws FileNotFoundException, IOException { + RemoteIterator it = fs.listFiles(rootDir, true); + + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.isDirectory()) { + continue; + } + // apply filter + if (filter.accept(lfs.getPath())) { + files.add(lfs.getPath().toString()); + } + } + return files; + } + + public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException { + cleanupHLogDir(context, conf); + cleanupTargetDir(context, conf); + } + + /** + * Clean up directories which are generated when DistCp copying hlogs. + * @throws IOException + */ + private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf) + throws IOException { + + String logDir = backupContext.getHLogTargetDir(); + if (logDir == null) { + LOG.warn("No log directory specified for " + backupContext.getBackupId()); + return; + } + + Path rootPath = new Path(logDir).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = listStatus(fs, rootPath, null); + if (files == null) { + return; + } + for (FileStatus file : files) { + LOG.debug("Delete log files: " + file.getPath().getName()); + fs.delete(file.getPath(), true); + } + } + + /** + * Clean up the data at target directory + */ + private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) { + try { + // clean up the data at target directory + LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId()); + String targetDir = backupInfo.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupInfo.getBackupId()); + return; + } + + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); + + for (TableName table : backupInfo.getTables()) { + Path targetDirPath = + new Path(getTableBackupDir(backupInfo.getTargetRootDir(), backupInfo.getBackupId(), + table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = listStatus(outputFs, tableDir, null); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true); + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at " + + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Given the backup root dir, backup id and the table name, return the backup image location, + * which is also where the backup manifest file is. return value look like: + * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" + * @param backupRootDir backup root directory + * @param backupId backup id + * @param table table name + * @return backupPath String for the particular table + */ + public static String + getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { + return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR + + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + + Path.SEPARATOR; + } + + public static TableName[] parseTableNames(String tables) { + if (tables == null) { + return null; + } + String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + + TableName[] ret = new TableName[tableArray.length]; + for (int i = 0; i < tableArray.length; i++) { + ret[i] = TableName.valueOf(tableArray[i]); + } + return ret; + } + + /** + * Sort history list by start time in descending order. + * @param historyList history list + * @return sorted list of BackupCompleteData + */ + public static ArrayList sortHistoryListDesc(ArrayList historyList) { + ArrayList list = new ArrayList(); + TreeMap map = new TreeMap(); + for (BackupInfo h : historyList) { + map.put(Long.toString(h.getStartTs()), h); + } + Iterator i = map.descendingKeySet().iterator(); + while (i.hasNext()) { + list.add(map.get(i.next())); + } + return list; + } + + /** + * Returns WAL file name + * @param walFileName WAL file name + * @return WAL file name + * @throws IOException exception + * @throws IllegalArgumentException exception + */ + public static String getUniqueWALFileNamePart(String walFileName) throws IOException { + return getUniqueWALFileNamePart(new Path(walFileName)); + } + + /** + * Returns WAL file name + * @param p - WAL file path + * @return WAL file name + * @throws IOException exception + */ + public static String getUniqueWALFileNamePart(Path p) throws IOException { + return p.getName(); + } + + /** + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates + * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and + * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. + * @param fs file system + * @param dir directory + * @param filter path filter + * @return null if dir is empty or doesn't exist, otherwise FileStatus array + */ + public static FileStatus[] + listStatus(final FileSystem fs, final Path dir, final PathFilter filter) throws IOException { + FileStatus[] status = null; + try { + status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); + } catch (FileNotFoundException fnfe) { + // if directory doesn't exist, return null + if (LOG.isTraceEnabled()) { + LOG.trace(dir + " doesn't exist"); + } + } + if (status == null || status.length < 1) return null; + return status; + } + + /** + * Return the 'path' component of a Path. In Hadoop, Path is an URI. This method returns the + * 'path' component of a Path's URI: e.g. If a Path is + * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, this method returns + * /hbase_trunk/TestTable/compaction.dir. This method is useful if you want to print + * out a Path without qualifying Filesystem instance. + * @param p Filesystem Path whose 'path' component we are to return. + * @return Path portion of the Filesystem + */ + public static String getPath(Path p) { + return p.toUri().getPath(); + } + + /** + * Given the backup root dir and the backup id, return the log file location for an incremental + * backup. + * @param backupRootDir backup root directory + * @param backupId backup id + * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" + */ + public static String getLogBackupDir(String backupRootDir, String backupId) { + return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR + + HConstants.HREGION_LOGDIR_NAME; + } + + private static List getHistory(Configuration conf, Path backupRootPath) + throws IOException { + // Get all (n) history from backup root destination + FileSystem fs = FileSystem.get(conf); + RemoteIterator it = fs.listLocatedStatus(backupRootPath); + + List infos = new ArrayList(); + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (!lfs.isDirectory()) continue; + String backupId = lfs.getPath().getName(); + try { + BackupInfo info = loadBackupInfo(backupRootPath, backupId, fs); + infos.add(info); + } catch(IOException e) { + LOG.error("Can not load backup info from: "+ lfs.getPath(), e); + } + } + // Sort + Collections.sort(infos, new Comparator() { + + @Override + public int compare(BackupInfo o1, BackupInfo o2) { + long ts1 = getTimestamp(o1.getBackupId()); + long ts2 = getTimestamp(o2.getBackupId()); + if (ts1 == ts2) return 0; + return ts1 < ts2 ? 1 : -1; + } + + private long getTimestamp(String backupId) { + String[] split = backupId.split("_"); + return Long.parseLong(split[1]); + } + }); + return infos; + } + + public static List getHistory(Configuration conf, int n, Path backupRootPath, + BackupInfo.Filter... filters) throws IOException { + List infos = getHistory(conf, backupRootPath); + List ret = new ArrayList(); + for (BackupInfo info : infos) { + if (ret.size() == n) { + break; + } + boolean passed = true; + for (int i = 0; i < filters.length; i++) { + if (!filters[i].apply(info)) { + passed = false; + break; + } + } + if (passed) { + ret.add(info); + } + } + return ret; + } + + public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs) + throws IOException { + Path backupPath = new Path(backupRootPath, backupId); + + RemoteIterator it = fs.listFiles(backupPath, true); + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) { + // Load BackupManifest + BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent()); + BackupInfo info = manifest.toBackupInfo(); + return info; + } + } + return null; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java index 486fd2b..d9bf749 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java @@ -158,14 +158,13 @@ public final class BackupServerUtil { * @throws IOException exception * @throws InterruptedException exception */ - public static void copyTableRegionInfo(MasterServices svc, BackupInfo backupContext, + public static void copyTableRegionInfo(Connection conn, BackupInfo backupContext, Configuration conf) throws IOException, InterruptedException { Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); // for each table in the table set, copy out the table info and region // info files in the correct directory structure - Connection conn = svc.getConnection(); for (TableName table : backupContext.getTables()) { if(!MetaTableAccessor.tableExists(conn, table)) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java new file mode 100644 index 0000000..76402c7 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.util; +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +/** + * Backup set is a named group of HBase tables, + * which are managed together by Backup/Restore + * framework. Instead of using list of tables in backup or restore + * operation, one can use set's name instead. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class BackupSet { + private final String name; + private final List tables; + + public BackupSet(String name, List tables) { + this.name = name; + this.tables = tables; + } + + public String getName() { + return name; + } + + public List getTables() { + return tables; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(name).append("={"); + for (int i = 0; i < tables.size(); i++) { + sb.append(tables.get(i)); + if (i < tables.size() - 1) { + sb.append(","); + } + } + sb.append("}"); + return sb.toString(); + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java index 3da7860..cc2ecdf 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.backup.util; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -39,26 +38,21 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.RestoreService; import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.RestoreService; import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStore; @@ -68,7 +62,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.Pair; /** * A collection for methods used by multiple classes to restore HBase tables. @@ -172,33 +165,21 @@ public class RestoreServerUtil { return regionDirList; } - static void modifyTableSync(MasterServices svc, HTableDescriptor desc) throws IOException { - svc.modifyTable(desc.getTableName(), desc, HConstants.NO_NONCE, HConstants.NO_NONCE); - @SuppressWarnings("serial") - Pair status = new Pair() {{ - setFirst(0); - setSecond(0); - }}; - int i = 0; - do { - status = svc.getAssignmentManager().getReopenStatus(desc.getTableName()); - if (status.getSecond() != 0) { - LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond() - + " regions updated."); - try { - Thread.sleep(1 * 1000l); - } catch (InterruptedException ie) { - InterruptedIOException iie = new InterruptedIOException(); - iie.initCause(ie); - throw iie; + static void modifyTableSync(Connection conn, HTableDescriptor desc) throws IOException { + + try (Admin admin = conn.getAdmin();) { + admin.modifyTable(desc.getTableName(), desc); + int attempt = 0; + int maxAttempts = 600; + while( !admin.isTableAvailable(desc.getTableName())) { + Thread.sleep(100); + attempt++; + if( attempt++ > maxAttempts) { + throw new IOException("Timeout expired "+(maxAttempts * 100)+"ms"); } - } else { - LOG.debug("All regions updated."); - break; } - } while (status.getFirst() != 0 && i++ < 500); - if (status.getFirst() != 0) { - throw new IOException("Failed to update all regions even after 500 seconds."); + } catch (Exception e) { + throw new IOException(e); } } @@ -206,7 +187,7 @@ public class RestoreServerUtil { * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in * the future - * @param svc MasterServices + * @param conn HBase connection * @param tableBackupPath backup path * @param logDirs : incremental backup folders, which contains WAL * @param tableNames : source tableNames(table names were backuped) @@ -214,9 +195,10 @@ public class RestoreServerUtil { * @param incrBackupId incremental backup Id * @throws IOException exception */ - public void incrementalRestoreTable(MasterServices svc, Path tableBackupPath, Path[] logDirs, + public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs, TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException { + try (Admin admin = conn.getAdmin();) { if (tableNames.length != newTableNames.length) { throw new IOException("Number of source tables and target tables does not match!"); } @@ -225,7 +207,7 @@ public class RestoreServerUtil { // for incremental backup image, expect the table already created either by user or previous // full backup. Here, check that all new tables exists for (TableName tableName : newTableNames) { - if (!MetaTableAccessor.tableExists(svc.getConnection(), tableName)) { + if (!admin.tableExists(tableName)) { throw new IOException("HBase table " + tableName + " does not exist. Create the table first, e.g. by restoring a full backup."); } @@ -237,7 +219,7 @@ public class RestoreServerUtil { LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId); TableName newTableName = newTableNames[i]; - HTableDescriptor newTableDescriptor = svc.getTableDescriptors().get(newTableName); + HTableDescriptor newTableDescriptor = admin.getTableDescriptor(newTableName); List families = Arrays.asList(tableDescriptor.getColumnFamilies()); List existingFamilies = Arrays.asList(newTableDescriptor.getColumnFamilies()); @@ -255,7 +237,7 @@ public class RestoreServerUtil { } } if (schemaChangeNeeded) { - RestoreServerUtil.modifyTableSync(svc, newTableDescriptor); + RestoreServerUtil.modifyTableSync(conn, newTableDescriptor); LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor); } } @@ -263,12 +245,13 @@ public class RestoreServerUtil { BackupRestoreServerFactory.getRestoreService(conf); restoreService.run(logDirs, tableNames, newTableNames, false); + } } - public void fullRestoreTable(MasterServices svc, Path tableBackupPath, TableName tableName, + public void fullRestoreTable(Connection conn, Path tableBackupPath, TableName tableName, TableName newTableName, boolean truncateIfExists, String lastIncrBackupId) throws IOException { - restoreTableAndCreate(svc, tableName, newTableName, tableBackupPath, truncateIfExists, + restoreTableAndCreate(conn, tableName, newTableName, tableBackupPath, truncateIfExists, lastIncrBackupId); } @@ -386,7 +369,7 @@ public class RestoreServerUtil { return null; } - private void restoreTableAndCreate(MasterServices svc, TableName tableName, + private void restoreTableAndCreate(Connection conn, TableName tableName, TableName newTableName, Path tableBackupPath, boolean truncateIfExists, String lastIncrBackupId) throws IOException { if (newTableName == null || newTableName.equals("")) { @@ -434,7 +417,7 @@ public class RestoreServerUtil { + ", will only create table"); } tableDescriptor.setName(newTableName); - checkAndCreateTable(svc, tableBackupPath, tableName, newTableName, null, tableDescriptor, + checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor, truncateIfExists); return; } else { @@ -461,7 +444,7 @@ public class RestoreServerUtil { // should only try to create the table with all region informations, so we could pre-split // the regions in fine grain - checkAndCreateTable(svc, tableBackupPath, tableName, newTableName, regionPathList, + checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, regionPathList, tableDescriptor, truncateIfExists); // Now get region splits from full backup @@ -702,18 +685,18 @@ public class RestoreServerUtil { * @param htd table descriptor * @throws IOException exception */ - private void checkAndCreateTable(MasterServices svc, Path tableBackupPath, TableName tableName, + private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName, TableName targetTableName, ArrayList regionDirList, HTableDescriptor htd, boolean truncateIfExists) throws IOException { - try { + try (Admin admin = conn.getAdmin();){ boolean createNew = false; - if (MetaTableAccessor.tableExists(svc.getConnection(), targetTableName)) { + if (admin.tableExists(targetTableName)) { if(truncateIfExists) { LOG.info("Truncating exising target table '" + targetTableName + "', preserving region splits"); - svc.disableTable(targetTableName, HConstants.NO_NONCE, HConstants.NO_NONCE); - svc.truncateTable(targetTableName, true, HConstants.NO_NONCE, HConstants.NO_NONCE); + admin.disableTable(targetTableName); + admin.truncateTable(targetTableName, true); } else{ LOG.info("Using exising target table '" + targetTableName + "'"); } @@ -724,14 +707,14 @@ public class RestoreServerUtil { LOG.info("Creating target table '" + targetTableName + "'"); byte[][] keys = null; if (regionDirList == null || regionDirList.size() == 0) { - svc.createTable(htd, null, HConstants.NO_NONCE, HConstants.NO_NONCE); + admin.createTable(htd, null); } else { keys = generateBoundaryKeys(regionDirList); // create table using table descriptor and region boundaries - svc.createTable(htd, keys, HConstants.NO_NONCE, HConstants.NO_NONCE); + admin.createTable(htd, keys); } long startTime = EnvironmentEdgeManager.currentTime(); - while (!((ClusterConnection)svc.getConnection()).isTableAvailable(targetTableName, keys)) { + while (!admin.isTableAvailable(targetTableName, keys)) { Thread.sleep(100); if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) { throw new IOException("Time out "+TABLE_AVAILABILITY_WAIT_TIME+ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a02f011..97f08e0 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -83,9 +83,9 @@ import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; -import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure; -import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure; -import org.apache.hadoop.hbase.backup.master.RestoreTablesProcedure; +import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; +import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; +import org.apache.hadoop.hbase.backup.impl.RestoreTablesClient; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; @@ -2617,120 +2617,7 @@ public class HMaster extends HRegionServer implements MasterServices { return procInfoList; } - @Override - public Pair backupTables(final BackupType type, - List tableList, final String targetRootDir, final int workers, - final long bandwidth, final String setName, - final long nonceGroup, final long nonce) throws IOException { - long procId; - String backupId = (setName == null || setName.length() == 0? - BackupRestoreConstants.BACKUPID_PREFIX: setName + "_") + - EnvironmentEdgeManager.currentTime(); - if (type == BackupType.INCREMENTAL) { - Set incrTableSet = null; - try (BackupSystemTable table = new BackupSystemTable(getConnection())) { - incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); - } - - if (incrTableSet.isEmpty()) { - LOG.warn("Incremental backup table set contains no table.\n" - + "Use 'backup create full' or 'backup stop' to \n " - + "change the tables covered by incremental backup."); - throw new DoNotRetryIOException("No table covered by incremental backup."); - } - - tableList.removeAll(incrTableSet); - if (!tableList.isEmpty()) { - String extraTables = StringUtils.join(",", tableList); - LOG.error("Some tables (" + extraTables + ") haven't gone through full backup"); - throw new DoNotRetryIOException("Perform full backup on " + extraTables + " first, " - + "then retry the command"); - } - LOG.info("Incremental backup for the following table set: " + incrTableSet); - tableList = Lists.newArrayList(incrTableSet); - } - if (tableList != null && !tableList.isEmpty()) { - for (TableName table : tableList) { - String targetTableBackupDir = - HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); - Path targetTableBackupDirPath = new Path(targetTableBackupDir); - FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf); - if (outputFs.exists(targetTableBackupDirPath)) { - throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir - + " exists already."); - } - } - ArrayList nonExistingTableList = null; - for (TableName tableName : tableList) { - if (!MetaTableAccessor.tableExists(getConnection(), tableName)) { - if (nonExistingTableList == null) { - nonExistingTableList = new ArrayList<>(); - } - nonExistingTableList.add(tableName); - } - } - if (nonExistingTableList != null) { - if (type == BackupType.INCREMENTAL ) { - LOG.warn("Incremental backup table set contains non-exising table: " - + nonExistingTableList); - // Update incremental backup set - tableList = excludeNonExistingTables(tableList, nonExistingTableList); - } else { - // Throw exception only in full mode - we try to backup non-existing table - throw new DoNotRetryIOException("Non-existing tables found in the table list: " - + nonExistingTableList); - } - } - } - if (type == BackupType.FULL) { - procId = this.procedureExecutor.submitProcedure( - new FullTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, - tableList, targetRootDir, workers, bandwidth), nonceGroup, nonce); - } else { - procId = this.procedureExecutor.submitProcedure( - new IncrementalTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, - tableList, targetRootDir, workers, bandwidth), nonceGroup, nonce); - } - return new Pair<>(procId, backupId); - } - private List excludeNonExistingTables(List tableList, - List nonExistingTableList) { - - for(TableName table: nonExistingTableList) { - tableList.remove(table); - } - return tableList; - } - - @Override - public long restoreTables(String backupRootDir, String backupId, boolean check, - List sTableList, List tTableList, boolean isOverwrite, - final long nonceGroup, final long nonce) throws IOException { - if (check) { - HashMap backupManifestMap = new HashMap<>(); - // check and load backup image manifest for the tables - Path rootPath = new Path(backupRootDir); - HBackupFileSystem.checkImageManifestExist(backupManifestMap, - sTableList.toArray(new TableName[sTableList.size()]), - conf, rootPath, backupId); - - // Check and validate the backup image and its dependencies - if (check) { - if (RestoreServerUtil.validate(backupManifestMap, conf)) { - LOG.info("Checking backup images: ok"); - } else { - String errMsg = "Some dependencies are missing for restore"; - LOG.error(errMsg); - throw new IOException(errMsg); - } - } - } - long procId = this.procedureExecutor.submitProcedure( - new RestoreTablesProcedure(procedureExecutor.getEnvironment(), backupRootDir, backupId, - sTableList, tTableList, isOverwrite), nonceGroup, nonce); - return procId; - } /** * Returns the list of table descriptors that match the specified request diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 8025a67..2c577c3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1053,48 +1053,6 @@ public class MasterRpcServices extends RSRpcServices } @Override - public MasterProtos.BackupTablesResponse backupTables( - RpcController controller, - MasterProtos.BackupTablesRequest request) throws ServiceException { - try { - BackupTablesResponse.Builder response = BackupTablesResponse.newBuilder(); - List tablesList = new ArrayList<>(request.getTablesList().size()); - for (HBaseProtos.TableName table : request.getTablesList()) { - tablesList.add(ProtobufUtil.toTableName(table)); - } - Pair pair = master.backupTables( - BackupType.valueOf(request.getType().name()), tablesList, request.getTargetRootDir(), - (int)request.getWorkers(), request.getBandwidth(), request.getBackupSetName(), - request.getNonceGroup(), request.getNonce()); - return response.setProcId(pair.getFirst()).setBackupId(pair.getSecond()).build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - - @Override - public MasterProtos.RestoreTablesResponse restoreTables( - RpcController controller, - MasterProtos.RestoreTablesRequest request) throws ServiceException { - try { - RestoreTablesResponse.Builder response = RestoreTablesResponse.newBuilder(); - List tablesList = new ArrayList<>(request.getTablesList().size()); - for (HBaseProtos.TableName table : request.getTablesList()) { - tablesList.add(ProtobufUtil.toTableName(table)); - } - List targetTablesList = new ArrayList<>(request.getTargetTablesList().size()); - for (HBaseProtos.TableName table : request.getTargetTablesList()) { - targetTablesList.add(ProtobufUtil.toTableName(table)); - } - long procId = master.restoreTables(request.getBackupRootDir(), request.getBackupId(), - request.getDependencyCheckOnly(), tablesList, targetTablesList, request.getOverwrite(), - request.getNonceGroup(), request.getNonce()); - return response.setProcId(procId).build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index d147ce2..1d7ef4e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -181,36 +181,6 @@ public interface MasterServices extends Server { final long nonce) throws IOException; - /** - * Full backup given list of tables - * @param type whether the backup is full or incremental - * @param tableList list of tables to backup - * @param targetRootDir root dir for saving the backup - * @param workers number of paralle workers. -1 - system defined - * @param bandwidth bandwidth per worker in MB per sec. -1 - unlimited - * @param setName - backup set name - * @param nonceGroup nonce group - * @param nonce nonce - * @return pair of procedure Id and backupId - * @throws IOException - */ - public Pair backupTables( - final BackupType type, - List tableList, - final String targetRootDir, - final int workers, - final long bandwidth, - final String setName, - final long nonceGroup, - final long nonce) throws IOException; - - /* - * Restore table set - */ - public long restoreTables(String backupRootDir, - String backupId, boolean check, List sTableList, - List tTableList, boolean isOverwrite, long nonceGroup, long nonce) - throws IOException; /** * Enable an existing table diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index ec53a64..abdfc0c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Durability; @@ -157,23 +157,18 @@ public class TestBackupBase { protected String backupTables(BackupType type, List tables, String path) throws IOException { Connection conn = null; - HBaseAdmin admin = null; BackupAdmin badmin = null; String backupId; try { conn = ConnectionFactory.createConnection(conf1); - admin = (HBaseAdmin) conn.getAdmin(); - BackupRequest request = new BackupRequest(); + badmin = new HBaseBackupAdmin(conn); + BackupRequest request = new BackupRequest(); request.setBackupType(type).setTableList(tables).setTargetRootDir(path); - badmin = admin.getBackupAdmin(); backupId = badmin.backupTables(request); } finally { if(badmin != null){ badmin.close(); } - if (admin != null) { - admin.close(); - } if (conn != null) { conn.close(); } @@ -264,7 +259,7 @@ public class TestBackupBase { } protected BackupAdmin getBackupAdmin() throws IOException { - return TEST_UTIL.getAdmin().getBackupAdmin(); + return new HBaseBackupAdmin(TEST_UTIL.getConnection()); } /** diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java index 62c47d6..280314b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.backup; +import java.io.IOException; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.Test; @@ -63,7 +63,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { * Verify that full backup fails on a single table that does not exist. * @throws Exception */ - @Test(expected = DoNotRetryIOException.class) + @Test(expected = IOException.class) public void testFullBackupSingleDNE() throws Exception { LOG.info("test full backup fails on a single table that does not exist"); @@ -75,7 +75,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { * Verify that full backup fails on multiple tables that do not exist. * @throws Exception */ - @Test(expected = DoNotRetryIOException.class) + @Test(expected = IOException.class) public void testFullBackupMultipleDNE() throws Exception { LOG.info("test full backup fails on multiple tables that do not exist"); @@ -87,7 +87,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { * Verify that full backup fails on tableset containing real and fake tables. * @throws Exception */ - @Test(expected = DoNotRetryIOException.class) + @Test(expected = IOException.class) public void testFullBackupMixExistAndDNE() throws Exception { LOG.info("create full backup fails on tableset containing real and fake table"); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index abdf3c7..09c3833 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -20,7 +20,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Table; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java index c39241e..1caba22 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -28,6 +28,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -59,9 +60,10 @@ public class TestBackupMultipleDeletes extends TestBackupBase { HBaseAdmin admin = null; Connection conn = ConnectionFactory.createConnection(conf1); admin = (HBaseAdmin) conn.getAdmin(); + BackupAdmin client = new HBaseBackupAdmin(conn); BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdFull = admin.getBackupAdmin().backupTables(request); + String backupIdFull = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull)); // #2 - insert some data to table table1 HTable t1 = (HTable) conn.getTable(table1); @@ -78,7 +80,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdInc1 = admin.getBackupAdmin().backupTables(request); + String backupIdInc1 = client.backupTables(request); assertTrue(checkSucceeded(backupIdInc1)); // #4 - insert some data to table table2 HTable t2 = (HTable) conn.getTable(table2); @@ -93,7 +95,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdInc2 = admin.getBackupAdmin().backupTables(request); + String backupIdInc2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdInc2)); // #6 - insert some data to table table1 t1 = (HTable) conn.getTable(table1); @@ -107,7 +109,7 @@ public class TestBackupMultipleDeletes extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdInc3 = admin.getBackupAdmin().backupTables(request); + String backupIdInc3 = client.backupTables(request); assertTrue(checkSucceeded(backupIdInc3)); // #8 - insert some data to table table2 t2 = (HTable) conn.getTable(table2); @@ -121,25 +123,25 @@ public class TestBackupMultipleDeletes extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdInc4 = admin.getBackupAdmin().backupTables(request); + String backupIdInc4 = client.backupTables(request); assertTrue(checkSucceeded(backupIdInc4)); // #10 full backup for table3 tables = Lists.newArrayList(table3); request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdFull2 = admin.getBackupAdmin().backupTables(request); + String backupIdFull2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull2)); // #11 - incremental backup for table3 tables = Lists.newArrayList(table3); request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdInc5 = admin.getBackupAdmin().backupTables(request); + String backupIdInc5 = client.backupTables(request); assertTrue(checkSucceeded(backupIdInc5)); LOG.error("Delete backupIdInc2"); - admin.getBackupAdmin().deleteBackups(new String[] { backupIdInc2 }); + client.deleteBackups(new String[] { backupIdInc2 }); LOG.error("Delete backupIdInc2 done"); - List list = admin.getBackupAdmin().getHistory(100); + List list = client.getHistory(100); // First check number of backup images before and after assertEquals(4, list.size()); // then verify that no backupIdInc2,3,4 diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 639aea4..a01801d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -21,7 +21,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index fe00ac5..7dabfb1 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -84,10 +84,11 @@ public class TestIncrementalBackup extends TestBackupBase { HBaseAdmin admin = null; admin = (HBaseAdmin) conn.getAdmin(); + HBaseBackupAdmin client = new HBaseBackupAdmin(conn); BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdFull = admin.getBackupAdmin().backupTables(request); + String backupIdFull = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull)); @@ -133,7 +134,7 @@ public class TestIncrementalBackup extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request); + String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); // add column family f2 to table1 @@ -151,7 +152,7 @@ public class TestIncrementalBackup extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = admin.getBackupAdmin().backupTables(request); + String backupIdIncMultiple2 = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple2)); // #4 - restore full backup for all tables, without overwrite @@ -160,8 +161,7 @@ public class TestIncrementalBackup extends TestBackupBase { TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; - - BackupAdmin client = getBackupAdmin(); + LOG.debug("Restoring full " + backupIdFull); client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index a7c0713..0a73888 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -25,8 +25,8 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.HBaseBackupAdmin; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -63,10 +63,11 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { HBaseAdmin admin = null; Connection conn = ConnectionFactory.createConnection(conf1); admin = (HBaseAdmin) conn.getAdmin(); + HBaseBackupAdmin client = new HBaseBackupAdmin(conn); BackupRequest request = new BackupRequest(); request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdFull = admin.getBackupAdmin().backupTables(request); + String backupIdFull = client.backupTables(request); assertTrue(checkSucceeded(backupIdFull)); @@ -91,7 +92,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { request = new BackupRequest(); request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) .setTargetRootDir(BACKUP_ROOT_DIR); - String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request); + String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); // #4 - restore full backup for all tables, without overwrite @@ -101,7 +102,6 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; - BackupAdmin client = getBackupAdmin(); client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false)); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 4f4f7ad..42f0ee7 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 2a8e3c9..c88f60d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -431,23 +431,6 @@ public class TestCatalogJanitor { return null; //To change body of implemented methods use File | Settings | File Templates. } - @Override - public Pair backupTables( - final BackupType type, - final List tableList, - final String targetRootDir, final int workers, - final long bandwidth, final String setName, - final long nonceGroup, final long nonce) throws IOException { - return null; - } - - @Override - public long restoreTables(String backupRootDir, - String backupId, boolean check, List sTableList, - List tTableList, boolean isOverwrite, long nonceGroup, long nonce) - throws IOException { - return -1; - } @Override public List listTableDescriptorsByNamespace(String name) throws IOException {