.../java/org/apache/hadoop/hbase/client/Admin.java | 50 +++------- .../apache/hadoop/hbase/client/CompactType.java | 35 +++++++ .../hadoop/hbase/client/CompactionState.java | 29 ++++++ .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 102 +++++++++++++++------ .../hadoop/hbase/client/MasterSwitchType.java | 29 ++++++ .../hadoop/hbase/client/SnapshotDescription.java | 84 +++++++++++++++++ .../apache/hadoop/hbase/client/SnapshotType.java | 29 ++++++ .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 71 ++++++++++++++ .../hadoop/hbase/protobuf/RequestConverter.java | 10 +- .../hadoop/hbase/client/TestSnapshotFromAdmin.java | 25 +++-- .../hbase/chaos/actions/CompactMobAction.java | 5 +- .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 6 +- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 2 +- .../coprocessor/BaseMasterAndRegionObserver.java | 6 +- .../hbase/coprocessor/BaseMasterObserver.java | 6 +- .../hadoop/hbase/coprocessor/MasterObserver.java | 6 +- .../hadoop/hbase/master/AssignmentManager.java | 6 +- .../org/apache/hadoop/hbase/master/HMaster.java | 6 +- .../hadoop/hbase/master/MasterCoprocessorHost.java | 6 +- .../hadoop/hbase/master/MasterRpcServices.java | 12 +-- .../master/normalizer/SimpleRegionNormalizer.java | 2 +- .../hbase/security/access/AccessController.java | 6 +- .../security/visibility/VisibilityController.java | 6 +- .../hadoop/hbase/snapshot/CreateSnapshot.java | 8 +- .../apache/hadoop/hbase/snapshot/SnapshotInfo.java | 38 +++++--- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 3 +- .../hbase/zookeeper/SplitOrMergeTracker.java | 10 +- .../resources/hbase-webapps/master/snapshot.jsp | 2 +- .../main/resources/hbase-webapps/master/table.jsp | 2 +- .../hadoop/hbase/client/TestFromClientSide.java | 3 +- .../hbase/client/TestSnapshotFromClient.java | 33 +++---- .../hbase/client/TestSplitOrMergeStatus.java | 32 +++---- .../hbase/coprocessor/TestMasterObserver.java | 5 +- .../hbase/master/TestDistributedLogSplitting.java | 5 +- .../hadoop/hbase/master/TestWarmupRegion.java | 5 +- .../procedure/TestCloneSnapshotProcedure.java | 10 +- .../procedure/TestRestoreSnapshotProcedure.java | 8 +- .../hbase/mob/compactions/TestMobCompactor.java | 13 +-- .../hbase/regionserver/TestCompactionState.java | 4 +- .../apache/hadoop/hbase/regionserver/TestTags.java | 2 +- .../security/access/TestAccessController.java | 3 +- .../hbase/snapshot/SnapshotTestingUtils.java | 43 +++++---- .../snapshot/TestFlushSnapshotFromClient.java | 68 ++++++++------ .../TestRestoreFlushSnapshotFromClient.java | 8 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 16 ++-- 45 files changed, 599 insertions(+), 261 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 97356a2..cd92638 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -41,9 +41,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaSettings; @@ -1153,7 +1150,7 @@ public interface Admin extends Abortable, Closeable { * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(final TableName tableName) + CompactionState getCompactionState(final TableName tableName) throws IOException; /** @@ -1164,7 +1161,7 @@ public interface Admin extends Abortable, Closeable { * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - AdminProtos.GetRegionInfoResponse.CompactionState getCompactionStateForRegion( + CompactionState getCompactionStateForRegion( final byte[] regionName) throws IOException; /** @@ -1244,7 +1241,7 @@ public interface Admin extends Abortable, Closeable { */ void snapshot(final String snapshotName, final TableName tableName, - HBaseProtos.SnapshotDescription.Type type) throws IOException, SnapshotCreationException, + SnapshotType type) throws IOException, SnapshotCreationException, IllegalArgumentException; /** @@ -1265,7 +1262,7 @@ public interface Admin extends Abortable, Closeable { * @throws SnapshotCreationException if snapshot failed to be taken * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - void snapshot(HBaseProtos.SnapshotDescription snapshot) + void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException, IllegalArgumentException; /** @@ -1278,7 +1275,7 @@ public interface Admin extends Abortable, Closeable { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - MasterProtos.SnapshotResponse takeSnapshotAsync(HBaseProtos.SnapshotDescription snapshot) + void takeSnapshotAsync(SnapshotDescription snapshot) throws IOException, SnapshotCreationException; /** @@ -1297,7 +1294,7 @@ public interface Admin extends Abortable, Closeable { * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is * unknown */ - boolean isSnapshotFinished(final HBaseProtos.SnapshotDescription snapshot) + boolean isSnapshotFinished(final SnapshotDescription snapshot) throws IOException, HBaseSnapshotException, UnknownSnapshotException; /** @@ -1470,7 +1467,7 @@ public interface Admin extends Abortable, Closeable { * @return a list of snapshot descriptors for completed snapshots * @throws IOException if a network error occurs */ - List listSnapshots() throws IOException; + List listSnapshots() throws IOException; /** * List all the completed snapshots matching the given regular expression. @@ -1479,7 +1476,7 @@ public interface Admin extends Abortable, Closeable { * @return - returns a List of SnapshotDescription * @throws IOException if a remote or network exception occurs */ - List listSnapshots(String regex) throws IOException; + List listSnapshots(String regex) throws IOException; /** * List all the completed snapshots matching the given pattern. @@ -1488,7 +1485,7 @@ public interface Admin extends Abortable, Closeable { * @return - returns a List of SnapshotDescription * @throws IOException if a remote or network exception occurs */ - List listSnapshots(Pattern pattern) throws IOException; + List listSnapshots(Pattern pattern) throws IOException; /** * List all the completed snapshots matching the given table name regular expression and snapshot @@ -1498,7 +1495,7 @@ public interface Admin extends Abortable, Closeable { * @return - returns a List of completed SnapshotDescription * @throws IOException if a remote or network exception occurs */ - List listTableSnapshots(String tableNameRegex, + List listTableSnapshots(String tableNameRegex, String snapshotNameRegex) throws IOException; /** @@ -1509,7 +1506,7 @@ public interface Admin extends Abortable, Closeable { * @return - returns a List of completed SnapshotDescription * @throws IOException if a remote or network exception occurs */ - List listTableSnapshots(Pattern tableNamePattern, + List listTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) throws IOException; /** @@ -1701,7 +1698,7 @@ public interface Admin extends Abortable, Closeable { * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - AdminProtos.GetRegionInfoResponse.CompactionState getCompactionState(final TableName tableName, + CompactionState getCompactionState(final TableName tableName, CompactType compactType) throws IOException; /** @@ -1741,27 +1738,4 @@ public interface Admin extends Abortable, Closeable { * and rollback the switch state to be original state before you change switch * */ void releaseSplitOrMergeLockAndRollback() throws IOException; - - /** - * Currently, there are only two compact types: - * {@code NORMAL} means do store files compaction; - * {@code MOB} means do mob files compaction. - * */ - @InterfaceAudience.Public - @InterfaceStability.Unstable - public enum CompactType { - - NORMAL (0), - MOB (1); - - CompactType(int value) {} - } - - @InterfaceAudience.Public - @InterfaceStability.Evolving - public enum MasterSwitchType { - SPLIT, - MERGE - } - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java new file mode 100644 index 0000000..17fec2b --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Currently, there are only two compact types: + * {@code NORMAL} means do store files compaction; + * {@code MOB} means do mob files compaction. + * */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public enum CompactType { + + NORMAL (0), + MOB (1); + + CompactType(int value) {} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java new file mode 100644 index 0000000..b4824ef --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * POJO representing the compaction state + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum CompactionState { + NONE, MINOR, MAJOR, MAJOR_AND_MINOR; +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 83f8bc1..cffaf74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; @@ -80,7 +81,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; @@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest; @@ -2262,7 +2261,10 @@ public class HBaseAdmin implements Admin { PayloadCarryingRpcController controller = rpcControllerFactory.newController(); // TODO: this does not do retries, it should. Set priority and timeout in controller GetRegionInfoResponse response = admin.getRegionInfo(controller, request); - return response.getCompactionState(); + if (response.getCompactionState() != null) { + return ProtobufUtil.createCompactionState(response.getCompactionState()); + } + return null; } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -2272,33 +2274,30 @@ public class HBaseAdmin implements Admin { public void snapshot(final String snapshotName, final TableName tableName) throws IOException, SnapshotCreationException, IllegalArgumentException { - snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH); + snapshot(snapshotName, tableName, SnapshotType.FLUSH); } @Override public void snapshot(final byte[] snapshotName, final TableName tableName) throws IOException, SnapshotCreationException, IllegalArgumentException { - snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH); + snapshot(Bytes.toString(snapshotName), tableName, SnapshotType.FLUSH); } @Override public void snapshot(final String snapshotName, final TableName tableName, - SnapshotDescription.Type type) + SnapshotType type) throws IOException, SnapshotCreationException, IllegalArgumentException { - SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); - builder.setTable(tableName.getNameAsString()); - builder.setName(snapshotName); - builder.setType(type); - snapshot(builder.build()); + snapshot(new SnapshotDescription(snapshotName, tableName.getNameAsString(), type)); } @Override - public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException, + public void snapshot(SnapshotDescription snapshotDesc) throws IOException, SnapshotCreationException, IllegalArgumentException { // actually take the snapshot - SnapshotResponse response = takeSnapshotAsync(snapshot); - final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot) - .build(); + HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc); + SnapshotResponse response = asyncSnapshot(snapshot); + final IsSnapshotDoneRequest request = + IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build(); IsSnapshotDoneResponse done = null; long start = EnvironmentEdgeManager.currentTime(); long max = response.getExpectedTimeout(); @@ -2336,8 +2335,37 @@ public class HBaseAdmin implements Admin { } @Override - public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException, + public void takeSnapshotAsync(SnapshotDescription snapshotDesc) throws IOException, SnapshotCreationException { + HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc); + asyncSnapshot(snapshot); + } + + private HBaseProtos.SnapshotDescription + createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + if (snapshotDesc.getTable() != null) { + builder.setTable(snapshotDesc.getTable()); + } + if (snapshotDesc.getName() != null) { + builder.setName(snapshotDesc.getName()); + } + if (snapshotDesc.getOwner() != null) { + builder.setOwner(snapshotDesc.getOwner()); + } + if (snapshotDesc.getCreationTime() != -1) { + builder.setCreationTime(snapshotDesc.getCreationTime()); + } + if (snapshotDesc.getVersion() != -1) { + builder.setVersion(snapshotDesc.getVersion()); + } + builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType())); + HBaseProtos.SnapshotDescription snapshot = builder.build(); + return snapshot; + } + + private SnapshotResponse asyncSnapshot(HBaseProtos.SnapshotDescription snapshot) + throws IOException { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot) .build(); @@ -2353,9 +2381,9 @@ public class HBaseAdmin implements Admin { } @Override - public boolean isSnapshotFinished(final SnapshotDescription snapshot) + public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc) throws IOException, HBaseSnapshotException, UnknownSnapshotException { - + final HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc); return executeCallable(new MasterCallable(getConnection()) { @Override public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException { @@ -2640,7 +2668,7 @@ public class HBaseAdmin implements Admin { private Future internalRestoreSnapshotAsync( final String snapshotName, final TableName tableName) throws IOException, RestoreSnapshotException { - final SnapshotDescription snapshot = SnapshotDescription.newBuilder() + final HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder() .setName(snapshotName).setTable(tableName.getNameAsString()).build(); // actually restore the snapshot @@ -2668,7 +2696,7 @@ public class HBaseAdmin implements Admin { private static class RestoreSnapshotFuture extends TableFuture { public RestoreSnapshotFuture( final HBaseAdmin admin, - final SnapshotDescription snapshot, + final HBaseProtos.SnapshotDescription snapshot, final TableName tableName, final RestoreSnapshotResponse response) { super(admin, tableName, @@ -2699,8 +2727,16 @@ public class HBaseAdmin implements Admin { public List call(int callTimeout) throws ServiceException { PayloadCarryingRpcController controller = rpcControllerFactory.newController(); controller.setCallTimeout(callTimeout); - return master.getCompletedSnapshots(controller, - GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList(); + List snapshotsList = master + .getCompletedSnapshots(controller, GetCompletedSnapshotsRequest.newBuilder().build()) + .getSnapshotsList(); + List result = new ArrayList(snapshotsList.size()); + for (HBaseProtos.SnapshotDescription snapshot : snapshotsList) { + result.add(new SnapshotDescription(snapshot.getName(), snapshot.getTable(), + ProtobufUtil.createSnapshotType(snapshot.getType()), snapshot.getOwner(), + snapshot.getCreationTime(), snapshot.getVersion())); + } + return result; } }); } @@ -2762,7 +2798,9 @@ public class HBaseAdmin implements Admin { controller.setCallTimeout(callTimeout); master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder(). - setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build() + setSnapshot( + HBaseProtos.SnapshotDescription.newBuilder().setName(snapshotName).build()) + .build() ); return null; } @@ -2795,7 +2833,7 @@ public class HBaseAdmin implements Admin { PayloadCarryingRpcController controller = rpcControllerFactory.newController(); controller.setCallTimeout(callTimeout); this.master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder() - .setSnapshot(snapshot).build()); + .setSnapshot(createHBaseProtosSnapshotDesc(snapshot)).build()); return null; } }); @@ -2998,7 +3036,8 @@ public class HBaseAdmin implements Admin { @Override public CompactionState getCompactionState(TableName tableName, CompactType compactType) throws IOException { - CompactionState state = CompactionState.NONE; + AdminProtos.GetRegionInfoResponse.CompactionState state = + AdminProtos.GetRegionInfoResponse.CompactionState.NONE; checkTableExists(tableName); PayloadCarryingRpcController controller = rpcControllerFactory.newController(); switch (compactType) { @@ -3040,16 +3079,16 @@ public class HBaseAdmin implements Admin { case MAJOR_AND_MINOR: return CompactionState.MAJOR_AND_MINOR; case MAJOR: - if (state == CompactionState.MINOR) { + if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MINOR) { return CompactionState.MAJOR_AND_MINOR; } - state = CompactionState.MAJOR; + state = AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR; break; case MINOR: - if (state == CompactionState.MAJOR) { + if (state == AdminProtos.GetRegionInfoResponse.CompactionState.MAJOR) { return CompactionState.MAJOR_AND_MINOR; } - state = CompactionState.MINOR; + state = AdminProtos.GetRegionInfoResponse.CompactionState.MINOR; break; case NONE: default: // nothing, continue @@ -3080,7 +3119,10 @@ public class HBaseAdmin implements Admin { } break; } - return state; + if(state != null) { + return ProtobufUtil.createCompactionState(state); + } + return null; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java new file mode 100644 index 0000000..7e31b25 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +/** + * Represents the master switch type + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum MasterSwitchType { + SPLIT, + MERGE +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java new file mode 100644 index 0000000..a455937 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * The POJO equivalent of HBaseProtos.SnapshotDescription + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class SnapshotDescription { + private String name; + private String table; + private SnapshotType snapShotType = SnapshotType.DISABLED; + private String owner; + private long creationTime = -1L; + private int version = -1; + + public SnapshotDescription(String name) { + this(name, null); + } + + public SnapshotDescription(String name, String table) { + this(name, table, SnapshotType.DISABLED, null); + } + + public SnapshotDescription(String name, String table, SnapshotType type) { + this(name, table, type, null); + } + + public SnapshotDescription(String name, String table, SnapshotType type, String owner) { + this(name, table, type, owner, -1, -1); + } + + public SnapshotDescription(String name, String table, SnapshotType type, String owner, + long creationTime, int version) { + this.name = name; + this.table = table; + this.snapShotType = type; + this.owner = owner; + this.creationTime = creationTime; + this.version = version; + } + + public String getName() { + return this.name; + } + + public String getTable() { + return this.table; + } + + public SnapshotType getType() { + return this.snapShotType; + } + + public String getOwner() { + return this.owner; + } + + public long getCreationTime() { + return this.creationTime; + } + + public int getVersion() { + return this.version; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java new file mode 100644 index 0000000..e3e12bd --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * POJO representing the snapshot type + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum SnapshotType { + DISABLED, FLUSH, SKIPFLUSH; +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 90516ec..27aa1dc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -67,6 +68,8 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -3397,4 +3400,72 @@ public final class ProtobufUtil { } return htd; } + + /** + * Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState}'s state + * @param state the protobuf CompactionState + * @return CompactionState + */ + public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) { + return CompactionState.valueOf(state.toString()); + } + + /** + * Creates {@link HBaseProtos.SnapshotDescription.Type} from {@link SnapshotType} + * @param snapshotDesc the POJO SnapshotDescription + * @return the protobuf SnapshotDescription type + */ + public static HBaseProtos.SnapshotDescription.Type + createProtosSnapShotDescType(SnapshotType type) { + return HBaseProtos.SnapshotDescription.Type.valueOf(type.name()); + } + + /** + * Creates {@link HBaseProtos.SnapshotDescription.Type} from the type of SnapshotDescription + * string + * @param snapshotDesc string representing the snapshot description type + * @return the protobuf SnapshotDescription type + */ + public static HBaseProtos.SnapshotDescription.Type + createProtosSnapShotDescType(String snapshotDesc) { + return HBaseProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase()); + } + + /** + * Creates {@link SnapshotType} from the type of + * {@link HBaseProtos.SnapshotDescription} + * @param snapshotDesc string representing the snapshot description type + * @return the protobuf SnapshotDescription type + */ + public static SnapshotType createSnapshotType(HBaseProtos.SnapshotDescription.Type type) { + return SnapshotType.valueOf(type.toString()); + } + + /** + * Convert from {@link SnapshotDescription} to {@link HBaseProtos.SnapshotDescription} + * @param snapshotDesc the POJO SnapshotDescription + * @return the protobuf SnapshotDescription + */ + public static HBaseProtos.SnapshotDescription + createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + if (snapshotDesc.getTable() != null) { + builder.setTable(snapshotDesc.getTable()); + } + if (snapshotDesc.getName() != null) { + builder.setName(snapshotDesc.getName()); + } + if (snapshotDesc.getOwner() != null) { + builder.setOwner(snapshotDesc.getOwner()); + } + if (snapshotDesc.getCreationTime() != -1L) { + builder.setCreationTime(snapshotDesc.getCreationTime()); + } + if (snapshotDesc.getVersion() != -1) { + builder.setVersion(snapshotDesc.getVersion()); + } + builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType())); + HBaseProtos.SnapshotDescription snapshot = builder.build(); + return snapshot; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 7502323..88c421f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -31,12 +31,12 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Action; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec; @@ -1705,7 +1705,7 @@ public final class RequestConverter { * @return a IsSplitOrMergeEnabledRequest */ public static IsSplitOrMergeEnabledRequest buildIsSplitOrMergeEnabledRequest( - Admin.MasterSwitchType switchType) { + MasterSwitchType switchType) { IsSplitOrMergeEnabledRequest.Builder builder = IsSplitOrMergeEnabledRequest.newBuilder(); builder.setSwitchType(convert(switchType)); return builder.build(); @@ -1728,18 +1728,18 @@ public final class RequestConverter { * @return a SetSplitOrMergeEnabledRequest */ public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled, - boolean synchronous, boolean skipLock, Admin.MasterSwitchType... switchTypes) { + boolean synchronous, boolean skipLock, MasterSwitchType... switchTypes) { SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder(); builder.setEnabled(enabled); builder.setSynchronous(synchronous); builder.setSkipLock(skipLock); - for (Admin.MasterSwitchType switchType : switchTypes) { + for (MasterSwitchType switchType : switchTypes) { builder.addSwitchTypes(convert(switchType)); } return builder.build(); } - private static MasterProtos.MasterSwitchType convert(Admin.MasterSwitchType switchType) { + private static MasterProtos.MasterSwitchType convert(MasterSwitchType switchType) { switch (switchType) { case SPLIT: return MasterProtos.MasterSwitchType.SPLIT; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index 4d55c33..b41c859 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest; @@ -141,18 +140,17 @@ public class TestSnapshotFromAdmin { Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory); Admin admin = new HBaseAdmin(mockConnection); - SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); // check that invalid snapshot names fail - failSnapshotStart(admin, builder.setName(HConstants.SNAPSHOT_DIR_NAME).build()); - failSnapshotStart(admin, builder.setName("-snapshot").build()); - failSnapshotStart(admin, builder.setName("snapshot fails").build()); - failSnapshotStart(admin, builder.setName("snap$hot").build()); - failSnapshotStart(admin, builder.setName("snap:hot").build()); + failSnapshotStart(admin, new SnapshotDescription(HConstants.SNAPSHOT_DIR_NAME)); + failSnapshotStart(admin, new SnapshotDescription("-snapshot")); + failSnapshotStart(admin, new SnapshotDescription("snapshot fails")); + failSnapshotStart(admin, new SnapshotDescription("snap$hot")); + failSnapshotStart(admin, new SnapshotDescription("snap:hot")); // check the table name also get verified - failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build()); - failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build()); - failSnapshotStart(admin, builder.setName("snapshot").setTable("table fails").build()); - failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build()); + failSnapshotStart(admin, new SnapshotDescription("snapshot", ".table")); + failSnapshotStart(admin, new SnapshotDescription("snapshot", "-table")); + failSnapshotStart(admin, new SnapshotDescription("snapshot", "table fails")); + failSnapshotStart(admin, new SnapshotDescription("snapshot", "tab%le")); // mock the master connection MasterKeepAliveConnection master = Mockito.mock(MasterKeepAliveConnection.class); @@ -167,10 +165,11 @@ public class TestSnapshotFromAdmin { Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse); // make sure that we can use valid names - admin.snapshot(builder.setName("snapshot").setTable("table").build()); + admin.snapshot(new SnapshotDescription("snapshot", "table")); } - private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) throws IOException { + private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) + throws IOException { try { admin.snapshot(snapshot); fail("Snapshot should not have succeed with name:" + snapshot.getName()); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java index 2fff77a..b75bbbd 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/CompactMobAction.java @@ -22,6 +22,7 @@ import org.apache.commons.lang.math.RandomUtils; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.CompactType; /** * Action that queues a table compaction. @@ -56,9 +57,9 @@ public class CompactMobAction extends Action { LOG.info("Performing action: Compact mob of table " + tableName + ", major=" + major); try { if (major) { - admin.majorCompact(tableName, Admin.CompactType.MOB); + admin.majorCompact(tableName, CompactType.MOB); } else { - admin.compact(tableName, Admin.CompactType.MOB); + admin.compact(tableName, CompactType.MOB); } } catch (Exception ex) { LOG.warn("Mob Compaction failed, might be caused by other chaos: " + ex.getMessage()); diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 22bad72..6bc7508 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -827,13 +827,13 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService @Override public boolean preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { return false; } @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 951a95e..4b1c192 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -39,13 +39,13 @@ org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.TableName; org.apache.hadoop.hbase.client.Admin; +org.apache.hadoop.hbase.client.SnapshotDescription; org.apache.hadoop.hbase.master.AssignmentManager; org.apache.hadoop.hbase.master.DeadServer; org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.RegionState; org.apache.hadoop.hbase.master.ServerManager; org.apache.hadoop.hbase.protobuf.ProtobufUtil; -org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; org.apache.hadoop.hbase.quotas.QuotaUtil; org.apache.hadoop.hbase.security.access.AccessControlLists; org.apache.hadoop.hbase.security.visibility.VisibilityConstants; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index 65398c2..74d9fe1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; @@ -450,7 +450,7 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver @Override public boolean preSetSplitOrMergeEnabled(ObserverContext ctx, boolean newValue, - Admin.MasterSwitchType switchType) + MasterSwitchType switchType) throws IOException { return false; } @@ -458,7 +458,7 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver @Override public void postSetSplitOrMergeEnabled(ObserverContext ctx, boolean newValue, - Admin.MasterSwitchType switchType) + MasterSwitchType switchType) throws IOException { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 18c6a0a..3574bbd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; @@ -442,13 +442,13 @@ public class BaseMasterObserver implements MasterObserver { @Override public boolean preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { return false; } @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 81f97aa..4b43cfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; @@ -807,7 +807,7 @@ public interface MasterObserver extends Coprocessor { * @param switchType type of switch */ boolean preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException; + final boolean newValue, final MasterSwitchType switchType) throws IOException; /** * Called after setting split / merge switch @@ -816,7 +816,7 @@ public interface MasterObserver extends Coprocessor { * @param switchType type of switch */ void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException; + final boolean newValue, final MasterSwitchType switchType) throws IOException; /** * Called prior to modifying the flag used to enable/disable region balancing. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 2b7713b..b81dcf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; @@ -2364,7 +2364,7 @@ public class AssignmentManager { } if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled( - Admin.MasterSwitchType.SPLIT)) { + MasterSwitchType.SPLIT)) { return "split switch is off!"; } @@ -2527,7 +2527,7 @@ public class AssignmentManager { } if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled( - Admin.MasterSwitchType.MERGE)) { + MasterSwitchType.MERGE)) { return "merge switch is off!"; } // Just return in case of retrying diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3b5af42..2f1cd3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -83,7 +83,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; @@ -2821,10 +2821,10 @@ public class HMaster extends HRegionServer implements MasterServices { /** * Queries the state of the {@link SplitOrMergeTracker}. If it is not initialized, * false is returned. If switchType is illegal, false will return. - * @param switchType see {@link org.apache.hadoop.hbase.client.Admin.MasterSwitchType} + * @param switchType see {@link org.apache.hadoop.hbase.client.MasterSwitchType} * @return The state of the switch */ - public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) { + public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { if (null == splitOrMergeTracker) { return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 313b1ad..14e8c20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -779,7 +779,7 @@ public class MasterCoprocessorHost } public boolean preSetSplitOrMergeEnabled(final boolean newValue, - final Admin.MasterSwitchType switchType) throws IOException { + final MasterSwitchType switchType) throws IOException { return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { @Override public void call(MasterObserver oserver, ObserverContext ctx) @@ -790,7 +790,7 @@ public class MasterCoprocessorHost } public void postSetSplitOrMergeEnabled(final boolean newValue, - final Admin.MasterSwitchType switchType) throws IOException { + final MasterSwitchType switchType) throws IOException { execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { @Override public void call(MasterObserver oserver, ObserverContext ctx) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index edfb3ce..18ee4fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -1500,8 +1500,8 @@ public class MasterRpcServices extends RSRpcServices if (!master.getSplitOrMergeTracker().lock(skipLock)) { throw new DoNotRetryIOException("can't set splitOrMerge switch due to lock"); } - for (MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { - Admin.MasterSwitchType switchType = convert(masterSwitchType); + for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { + MasterSwitchType switchType = convert(masterSwitchType); boolean oldValue = master.isSplitOrMergeEnabled(switchType); response.addPrevValue(oldValue); boolean bypass = false; @@ -1619,12 +1619,12 @@ public class MasterRpcServices extends RSRpcServices return response.build(); } - private Admin.MasterSwitchType convert(MasterSwitchType switchType) { + private MasterSwitchType convert(MasterProtos.MasterSwitchType switchType) { switch (switchType) { case SPLIT: - return Admin.MasterSwitchType.SPLIT; + return MasterSwitchType.SPLIT; case MERGE: - return Admin.MasterSwitchType.MERGE; + return MasterSwitchType.MERGE; default: break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 13b5fab..583f873 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin.MasterSwitchType; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index fb19a96..e866f29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -60,12 +60,12 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagRewriteCell; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Query; @@ -1262,14 +1262,14 @@ public class AccessController extends BaseMasterAndRegionObserver @Override public boolean preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { requirePermission("setSplitOrMergeEnabled", Action.ADMIN); return false; } @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 1b18f42..3fa3fa3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -51,11 +51,11 @@ import org.apache.hadoop.hbase.TagRewriteCell; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -310,13 +310,13 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public boolean preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { return false; } @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java index 06b6017..09284e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import java.util.Arrays; @@ -66,10 +68,10 @@ public class CreateSnapshot extends AbstractHBaseTool { admin = connection.getAdmin(); HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH; if (snapshotType != null) { - type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase()); + type = ProtobufUtil.createProtosSnapShotDescType(snapshotName); } - - admin.snapshot(snapshotName, TableName.valueOf(tableName), type); + admin.snapshot(new SnapshotDescription(snapshotName, tableName, + ProtobufUtil.createSnapshotType(type))); } catch (Exception e) { return -1; } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 0a359f8..c944fc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; @@ -47,7 +48,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.WALLink; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.util.FSUtils; @@ -126,14 +128,15 @@ public final class SnapshotInfo extends Configured implements Tool { private AtomicLong hfilesMobSize = new AtomicLong(); private AtomicLong logSize = new AtomicLong(); - private final SnapshotDescription snapshot; + private final HBaseProtos.SnapshotDescription snapshot; private final TableName snapshotTable; private final Configuration conf; private final FileSystem fs; - SnapshotStats(final Configuration conf, final FileSystem fs, final SnapshotDescription snapshot) + SnapshotStats(final Configuration conf, final FileSystem fs, + final SnapshotDescription snapshot) { - this.snapshot = snapshot; + this.snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = conf; this.fs = fs; @@ -141,7 +144,9 @@ public final class SnapshotInfo extends Configured implements Tool { /** @return the snapshot descriptor */ public SnapshotDescription getSnapshotDescription() { - return this.snapshot; + return new SnapshotDescription(this.snapshot.getName(), this.snapshot.getTable(), + ProtobufUtil.createSnapshotType(this.snapshot.getType()), this.snapshot.getOwner(), + this.snapshot.getCreationTime(), this.snapshot.getVersion()); } /** @return true if the snapshot is corrupted */ @@ -371,7 +376,8 @@ public final class SnapshotInfo extends Configured implements Tool { return false; } - SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + HBaseProtos.SnapshotDescription snapshotDesc = + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc); return true; } @@ -380,7 +386,7 @@ public final class SnapshotInfo extends Configured implements Tool { * Dump the {@link SnapshotDescription} */ private void printInfo() { - SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); + HBaseProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); System.out.println("Snapshot Info"); System.out.println("----------------------------------------"); @@ -413,9 +419,12 @@ public final class SnapshotInfo extends Configured implements Tool { } // Collect information about hfiles and logs in the snapshot - final SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); + final HBaseProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); final String table = snapshotDesc.getTable(); - final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, snapshotDesc); + SnapshotDescription desc = new SnapshotDescription(snapshotDesc.getName(), + snapshotDesc.getTable(), ProtobufUtil.createSnapshotType(snapshotDesc.getType()), + snapshotDesc.getOwner(), snapshotDesc.getCreationTime(), snapshotDesc.getVersion()); + final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest, new SnapshotReferenceUtil.SnapshotVisitor() { @Override @@ -492,10 +501,11 @@ public final class SnapshotInfo extends Configured implements Tool { */ public static SnapshotStats getSnapshotStats(final Configuration conf, final SnapshotDescription snapshot) throws IOException { + HBaseProtos.SnapshotDescription snapshotDesc = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); - SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot); + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotDesc, rootDir); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); final SnapshotStats stats = new SnapshotStats(conf, fs, snapshot); SnapshotReferenceUtil.concurrentVisitReferencedFiles(conf, fs, manifest, new SnapshotReferenceUtil.SnapshotVisitor() { @@ -525,7 +535,11 @@ public final class SnapshotInfo extends Configured implements Tool { List snapshotLists = new ArrayList(snapshots.length); for (FileStatus snapshotDirStat: snapshots) { - snapshotLists.add(SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath())); + HBaseProtos.SnapshotDescription snapshotDesc = + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); + snapshotLists.add(new SnapshotDescription(snapshotDesc.getName(), + snapshotDesc.getTable(), ProtobufUtil.createSnapshotType(snapshotDesc.getType()), + snapshotDesc.getOwner(), snapshotDesc.getCreationTime(), snapshotDesc.getVersion())); } return snapshotLists; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 5af0634..31ae925 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; @@ -691,7 +692,7 @@ public class HBaseFsck extends Configured implements Closeable { if (shouldDisableSplitAndMerge()) { admin.releaseSplitOrMergeLockAndRollback(); oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); } try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java index e548245..b975c43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java @@ -24,7 +24,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; @@ -80,7 +80,7 @@ public class SplitOrMergeTracker { mergeStateTracker.start(); } - public boolean isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) { + public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { switch (switchType) { case SPLIT: return splitStateTracker.isSwitchEnabled(); @@ -92,7 +92,7 @@ public class SplitOrMergeTracker { return false; } - public void setSplitOrMergeEnabled(boolean enabled, Admin.MasterSwitchType switchType) + public void setSplitOrMergeEnabled(boolean enabled, MasterSwitchType switchType) throws KeeperException { switch (switchType) { case SPLIT: @@ -164,8 +164,8 @@ public class SplitOrMergeTracker { } private void saveOriginalState() throws KeeperException { - boolean splitEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); - boolean mergeEnabled = isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + boolean splitEnabled = isSplitOrMergeEnabled(MasterSwitchType.SPLIT); + boolean mergeEnabled = isSplitOrMergeEnabled(MasterSwitchType.MERGE); String splitOrMergeStates = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), SplitOrMergeTracker.STATE); ZooKeeperProtos.SplitAndMergeState.Builder builder diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index 5900383..6bb1aa3 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -21,9 +21,9 @@ import="java.util.Date" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.Admin" + import="org.apache.hadoop.hbase.client.SnapshotDescription" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" - import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription" import="org.apache.hadoop.util.StringUtils" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.HBaseConfiguration" %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 4a151e7..693a663 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -28,6 +28,7 @@ import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.HTable" import="org.apache.hadoop.hbase.client.Admin" + import="org.apache.hadoop.hbase.client.CompactionState" import="org.apache.hadoop.hbase.client.RegionLocator" import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.HRegionLocation" @@ -39,7 +40,6 @@ import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator" import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.FSUtils" - import="org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.HColumnDescriptor" import="org.apache.hadoop.hbase.client.RegionReplicaUtil" diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 3549791..1d13a68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -85,7 +85,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; @@ -278,7 +277,7 @@ public class TestFromClientSide { @Override public boolean evaluate() throws IOException { return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) == - AdminProtos.GetRegionInfoResponse.CompactionState.NONE; + CompactionState.NONE; } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 1b61147..8317376 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; @@ -212,18 +212,13 @@ public class TestSnapshotFromClient { final String SNAPSHOT_NAME = "offlineTableSnapshot"; byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME); - SnapshotDescription desc = SnapshotDescription.newBuilder() - .setType(SnapshotDescription.Type.DISABLED) - .setTable(STRING_TABLE_NAME) - .setName(SNAPSHOT_NAME) - .setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION) - .build(); - admin.snapshot(desc); + admin.snapshot(new SnapshotDescription(SNAPSHOT_NAME, STRING_TABLE_NAME, + SnapshotType.DISABLED, null, -1, SnapshotManifestV1.DESCRIPTOR_VERSION)); LOG.debug("Snapshot completed."); // make sure we have the snapshot - List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshot, TABLE_NAME); + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); @@ -231,9 +226,9 @@ public class TestSnapshotFromClient { LOG.debug("FS state after snapshot:"); FSUtils.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()), LOG); - - SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, - admin, fs); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM, + rootDir, admin, fs); admin.deleteSnapshot(snapshot); snapshots = admin.listSnapshots(); @@ -292,8 +287,8 @@ public class TestSnapshotFromClient { LOG.debug("Snapshot completed."); // make sure we have the snapshot - List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshot, TABLE_NAME); + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); @@ -304,8 +299,9 @@ public class TestSnapshotFromClient { List emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region List nonEmptyCfs = Lists.newArrayList(); - SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs, - rootDir, admin, fs); + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, nonEmptyCfs, + emptyCfs, rootDir, admin, fs); admin.deleteSnapshot(snapshot); snapshots = admin.listSnapshots(); @@ -375,7 +371,8 @@ public class TestSnapshotFromClient { admin.snapshot(Bytes.toBytes(table2Snapshot1), TABLE_NAME); LOG.debug(table2Snapshot1 + " completed."); - List listTableSnapshots = admin.listTableSnapshots("test.*", "Table1.*"); + List listTableSnapshots = + admin.listTableSnapshots("test.*", "Table1.*"); List listTableSnapshotNames = new ArrayList(); assertEquals(2, listTableSnapshots.size()); for (SnapshotDescription s : listTableSnapshots) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index e1ce63b..c5ca0b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -78,14 +78,14 @@ public class TestSplitOrMergeStatus { Admin admin = TEST_UTIL.getAdmin(); initSwitchStatus(admin); boolean[] results = admin.setSplitOrMergeEnabled(false, false, - true, Admin.MasterSwitchType.SPLIT); + true, MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertTrue(results[0]); admin.split(t.getName()); int count = waitOnSplitOrMerge(t).size(); assertTrue(orignalCount == count); - results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT); + results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertFalse(results[0]); admin.split(t.getName()); @@ -111,7 +111,7 @@ public class TestSplitOrMergeStatus { waitForMergable(admin, name); int orignalCount = locator.getAllRegionLocations().size(); boolean[] results = admin.setSplitOrMergeEnabled(false, false, - true, Admin.MasterSwitchType.MERGE); + true, MasterSwitchType.MERGE); assertEquals(results.length, 1); assertTrue(results[0]); List regions = admin.getTableRegions(t.getName()); @@ -122,7 +122,7 @@ public class TestSplitOrMergeStatus { assertTrue(orignalCount == count); waitForMergable(admin, name); - results = admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE); + results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE); assertEquals(results.length, 1); assertFalse(results[0]); admin.mergeRegions(regions.get(0).getEncodedNameAsBytes(), @@ -136,12 +136,12 @@ public class TestSplitOrMergeStatus { public void testMultiSwitches() throws IOException { Admin admin = TEST_UTIL.getAdmin(); boolean[] switches = admin.setSplitOrMergeEnabled(false, false, true, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); for (boolean s : switches){ assertTrue(s); } - assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)); - assertFalse(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)); + assertFalse(admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)); + assertFalse(admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)); admin.close(); } @@ -149,10 +149,10 @@ public class TestSplitOrMergeStatus { public void testSwitchLock() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.setSplitOrMergeEnabled(false, false, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); try { admin.setSplitOrMergeEnabled(false, false, true, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); fail(); } catch (IOException e) { LOG.info("", e); @@ -160,7 +160,7 @@ public class TestSplitOrMergeStatus { admin.releaseSplitOrMergeLockAndRollback(); try { admin.setSplitOrMergeEnabled(true, false, true, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); } catch (IOException e) { fail(); } @@ -168,14 +168,14 @@ public class TestSplitOrMergeStatus { } private void initSwitchStatus(Admin admin) throws IOException { - if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)) { - admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.SPLIT); + if (!admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { + admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT); } - if (!admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)) { - admin.setSplitOrMergeEnabled(true, false, true, Admin.MasterSwitchType.MERGE); + if (!admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { + admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE); } - assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT)); - assertTrue(admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE)); + assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)); + assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)); } private void waitForMergable(Admin admin, TableName t) throws InterruptedException, IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 65fc0c3..5257cee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -348,13 +349,13 @@ public class TestMasterObserver { @Override public boolean preSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { return false; } @Override public void postSetSplitOrMergeEnabled(final ObserverContext ctx, - final boolean newValue, final Admin.MasterSwitchType switchType) throws IOException { + final boolean newValue, final MasterSwitchType switchType) throws IOException { } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index cff8db0..4f2385f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -87,7 +88,6 @@ import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; @@ -1421,7 +1421,8 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return (TEST_UTIL.getHBaseAdmin().getCompactionState(tableName) == CompactionState.NONE); + return (TEST_UTIL.getHBaseAdmin() + .getCompactionState(tableName) == CompactionState.NONE); } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java index 53ee92b..cb7337e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -31,7 +31,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -106,7 +107,7 @@ public class TestWarmupRegion { @Override public boolean evaluate() throws IOException { return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) == - AdminProtos.GetRegionInfoResponse.CompactionState.NONE; + CompactionState.NONE; } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java index aeafbf8..96f78f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java @@ -33,7 +33,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotState; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -59,7 +61,7 @@ public class TestCloneSnapshotProcedure { private static long nonceGroup = HConstants.NO_NONCE; private static long nonce = HConstants.NO_NONCE; - private static SnapshotDescription snapshot = null; + private static HBaseProtos.SnapshotDescription snapshot = null; private static void setupConf(Configuration conf) { conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); @@ -99,7 +101,7 @@ public class TestCloneSnapshotProcedure { assertTrue("expected executor to be running", procExec.isRunning()); } - private SnapshotDescription getSnapshot() throws Exception { + private HBaseProtos.SnapshotDescription getSnapshot() throws Exception { if (snapshot == null) { final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot"); long tid = System.currentTimeMillis(); @@ -116,7 +118,7 @@ public class TestCloneSnapshotProcedure { admin.enableTable(snapshotTableName); List snapshotList = admin.listSnapshots(); - snapshot = snapshotList.get(0); + snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0)); } return snapshot; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java index 44d6988..733dcb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java @@ -35,7 +35,9 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.client.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -73,7 +75,7 @@ public class TestRestoreSnapshotProcedure { private static long nonceGroup = HConstants.NO_NONCE; private static long nonce = HConstants.NO_NONCE; - private SnapshotDescription snapshot = null; + private HBaseProtos.SnapshotDescription snapshot = null; private HTableDescriptor snapshotHTD = null; private static void setupConf(Configuration conf) { @@ -141,7 +143,7 @@ public class TestRestoreSnapshotProcedure { admin.snapshot(snapshotName, snapshotTableName); List snapshotList = admin.listSnapshots(); - snapshot = snapshotList.get(0); + snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotList.get(0)); // modify the table HColumnDescriptor columnFamilyDescriptor3 = new HColumnDescriptor(CF3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index 9922aff..c0ad2dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -51,6 +51,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.CompactType; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; @@ -68,7 +70,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -356,7 +357,7 @@ public class TestMobCompactor { countFiles(tableName, false, family2)); // do the major mob compaction, it will force all files to compaction - admin.majorCompact(tableName, hcd1.getName(), Admin.CompactType.MOB); + admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB); waitUntilMobCompactionFinished(tableName); assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), @@ -399,7 +400,7 @@ public class TestMobCompactor { Cell cell = result.getColumnLatestCell(hcd1.getName(), Bytes.toBytes(qf1)); assertEquals("Before compaction: mob value of k0", newValue0, Bytes.toString(CellUtil.cloneValue(cell))); - admin.majorCompact(tableName, hcd1.getName(), Admin.CompactType.MOB); + admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB); waitUntilMobCompactionFinished(tableName); // read the latest cell of key0, the cell seqId in bulk loaded file is not reset in the // scanner. The cell that has "new" value is still visible. @@ -449,7 +450,7 @@ public class TestMobCompactor { loadData(admin, bufMut, tableName, new Put[] { put1 }); // now two mob files admin.majorCompact(tableName); waitUntilCompactionFinished(tableName); - admin.majorCompact(tableName, hcd1.getName(), Admin.CompactType.MOB); + admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB); waitUntilMobCompactionFinished(tableName); // read the latest cell of key1. Get get = new Get(key1); @@ -475,12 +476,12 @@ public class TestMobCompactor { private void waitUntilMobCompactionFinished(TableName tableName) throws IOException, InterruptedException { long finished = EnvironmentEdgeManager.currentTime() + 60000; - CompactionState state = admin.getCompactionState(tableName, Admin.CompactType.MOB); + CompactionState state = admin.getCompactionState(tableName, CompactType.MOB); while (EnvironmentEdgeManager.currentTime() < finished) { if (state == CompactionState.NONE) { break; } - state = admin.getCompactionState(tableName, Admin.CompactType.MOB); + state = admin.getCompactionState(tableName, CompactType.MOB); Thread.sleep(10); } assertEquals(CompactionState.NONE, state); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 4715d53..d6302b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -31,9 +31,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -164,7 +164,7 @@ public class TestCompactionState { // otherwise, the compaction should have already been done if (expectedState != state) { for (Region region: regions) { - state = region.getCompactionState(); + state = CompactionState.valueOf(region.getCompactionState().toString()); assertEquals(CompactionState.NONE, state); } } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java index 3a9ace2..06dbc37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTags.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Mutation; @@ -53,7 +54,6 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 37c42a0..92d7806 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -710,7 +711,7 @@ public class TestAccessController extends SecureTestUtil { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSetSplitOrMergeEnabled(ObserverContext.createAndPrepare(CP_ENV, null), - true, Admin.MasterSwitchType.MERGE); + true, MasterSwitchType.MERGE); return null; } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index 666eea3..6615a8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -59,7 +60,8 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; @@ -120,7 +122,7 @@ public class SnapshotTestingUtils { * Make sure that there is only one snapshot returned from the master */ public static void assertOneSnapshotThatMatches(Admin admin, - SnapshotDescription snapshot) throws IOException { + HBaseProtos.SnapshotDescription snapshot) throws IOException { assertOneSnapshotThatMatches(admin, snapshot.getName(), TableName.valueOf(snapshot.getTable())); } @@ -153,7 +155,7 @@ public class SnapshotTestingUtils { } public static void confirmSnapshotValid(HBaseTestingUtility testUtil, - SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family) + HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family) throws IOException { MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem(); confirmSnapshotValid(snapshotDescriptor, tableName, family, @@ -165,7 +167,7 @@ public class SnapshotTestingUtils { * be in the snapshot. */ public static void confirmSnapshotValid( - SnapshotDescription snapshotDescriptor, TableName tableName, + HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] testFamily, Path rootDir, Admin admin, FileSystem fs) throws IOException { ArrayList nonEmptyTestFamilies = new ArrayList(1); @@ -178,7 +180,7 @@ public class SnapshotTestingUtils { * Confirm that the snapshot has no references files but only metadata. */ public static void confirmEmptySnapshotValid( - SnapshotDescription snapshotDescriptor, TableName tableName, + HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] testFamily, Path rootDir, Admin admin, FileSystem fs) throws IOException { ArrayList emptyTestFamilies = new ArrayList(1); @@ -194,7 +196,7 @@ public class SnapshotTestingUtils { * by the MasterSnapshotVerifier, at the end of the snapshot operation. */ public static void confirmSnapshotValid( - SnapshotDescription snapshotDescriptor, TableName tableName, + HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, List nonEmptyTestFamilies, List emptyTestFamilies, Path rootDir, Admin admin, FileSystem fs) throws IOException { final Configuration conf = admin.getConfiguration(); @@ -204,7 +206,7 @@ public class SnapshotTestingUtils { snapshotDescriptor, rootDir); assertTrue(fs.exists(snapshotDir)); - SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + HBaseProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); // Extract regions and families with store files final Set snapshotFamilies = new TreeSet(Bytes.BYTES_COMPARATOR); @@ -265,7 +267,7 @@ public class SnapshotTestingUtils { * @throws ServiceException if the snapshot fails */ public static void waitForSnapshotToComplete(HMaster master, - SnapshotDescription snapshot, long sleep) throws ServiceException { + HBaseProtos.SnapshotDescription snapshot, long sleep) throws ServiceException { final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder() .setSnapshot(snapshot).build(); IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder() @@ -286,12 +288,13 @@ public class SnapshotTestingUtils { */ public static void snapshot(Admin admin, final String snapshotName, final String tableName, - SnapshotDescription.Type type, int numTries) throws IOException { + HBaseProtos.SnapshotDescription.Type type, int numTries) throws IOException { int tries = 0; CorruptedSnapshotException lastEx = null; while (tries++ < numTries) { try { - admin.snapshot(snapshotName, TableName.valueOf(tableName), type); + admin.snapshot(new SnapshotDescription(snapshotName, tableName, + SnapshotType.valueOf(type.toString()))); return; } catch (CorruptedSnapshotException cse) { LOG.warn("Got CorruptedSnapshotException", cse); @@ -393,13 +396,14 @@ public class SnapshotTestingUtils { } admin.snapshot(snapshotNameString, tableName); - List snapshots = SnapshotTestingUtils.assertExistsMatchingSnapshot(admin, - snapshotNameString, tableName); + List snapshots = + SnapshotTestingUtils.assertExistsMatchingSnapshot(admin, snapshotNameString, tableName); if (snapshots == null || snapshots.size() != 1) { Assert.fail("Incorrect number of snapshots for table " + tableName); } - SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames, + SnapshotTestingUtils.confirmSnapshotValid( + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), tableName, nonEmptyFamilyNames, emptyFamilyNames, rootDir, admin, fs); } @@ -418,7 +422,8 @@ public class SnapshotTestingUtils { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, mfs.getRootDir()); - SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); + HBaseProtos.SnapshotDescription snapshotDesc = + SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); final TableName table = TableName.valueOf(snapshotDesc.getTable()); final ArrayList corruptedFiles = new ArrayList(); @@ -467,7 +472,7 @@ public class SnapshotTestingUtils { public static class SnapshotBuilder { private final RegionData[] tableRegions; - private final SnapshotDescription desc; + private final HBaseProtos.SnapshotDescription desc; private final HTableDescriptor htd; private final Configuration conf; private final FileSystem fs; @@ -477,7 +482,7 @@ public class SnapshotTestingUtils { public SnapshotBuilder(final Configuration conf, final FileSystem fs, final Path rootDir, final HTableDescriptor htd, - final SnapshotDescription desc, final RegionData[] tableRegions) + final HBaseProtos.SnapshotDescription desc, final RegionData[] tableRegions) throws IOException { this.fs = fs; this.conf = conf; @@ -495,7 +500,7 @@ public class SnapshotTestingUtils { return this.htd; } - public SnapshotDescription getSnapshotDescription() { + public HBaseProtos.SnapshotDescription getSnapshotDescription() { return this.desc; } @@ -519,7 +524,7 @@ public class SnapshotTestingUtils { .build()); } - private Path[] addRegion(final SnapshotDescription desc) throws IOException { + private Path[] addRegion(final HBaseProtos.SnapshotDescription desc) throws IOException { if (this.snapshotted == tableRegions.length) { throw new UnsupportedOperationException("No more regions in the table"); } @@ -648,7 +653,7 @@ public class SnapshotTestingUtils { HTableDescriptor htd = createHtd(tableName); RegionData[] regions = createTable(htd, numRegions); - SnapshotDescription desc = SnapshotDescription.newBuilder() + HBaseProtos.SnapshotDescription desc = HBaseProtos.SnapshotDescription.newBuilder() .setTable(htd.getNameAsString()) .setName(snapshotName) .setVersion(version) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 4b988a6..0a933c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -44,7 +44,9 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -147,18 +149,20 @@ public class TestFlushSnapshotFromClient { // take a snapshot of the enabled table String snapshotString = "offlineTableSnapshot"; byte[] snapshot = Bytes.toBytes(snapshotString); - admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH); + admin.snapshot(snapshotString, TABLE_NAME, + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)); LOG.debug("Snapshot completed."); // make sure we have the snapshot - List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshot, TABLE_NAME); + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot LOG.debug("FS state after snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); - SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM); + SnapshotTestingUtils.confirmSnapshotValid(UTIL, + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM); } /** @@ -181,18 +185,20 @@ public class TestFlushSnapshotFromClient { // take a snapshot of the enabled table String snapshotString = "skipFlushTableSnapshot"; byte[] snapshot = Bytes.toBytes(snapshotString); - admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH); + admin.snapshot(snapshotString, TABLE_NAME, + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.SKIPFLUSH)); LOG.debug("Snapshot completed."); // make sure we have the snapshot - List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshot, TABLE_NAME); + List snapshots = + SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME); // make sure its a valid snapshot LOG.debug("FS state after snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); - SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM); + SnapshotTestingUtils.confirmSnapshotValid(UTIL, + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM); admin.deleteSnapshot(snapshot); snapshots = admin.listSnapshots(); @@ -234,7 +240,8 @@ public class TestFlushSnapshotFromClient { LOG.debug("FS state after snapshot:"); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG); - SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM); + SnapshotTestingUtils.confirmSnapshotValid(UTIL, + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM); } @Test @@ -258,7 +265,8 @@ public class TestFlushSnapshotFromClient { // snapshot the non-existant table try { - admin.snapshot("fail", tableName, SnapshotDescription.Type.FLUSH); + admin.snapshot("fail", tableName, + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)); fail("Snapshot succeeded even though there is not table."); } catch (SnapshotCreationException e) { LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage()); @@ -267,13 +275,14 @@ public class TestFlushSnapshotFromClient { @Test public void testAsyncFlushSnapshot() throws Exception { - SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot") - .setTable(TABLE_NAME.getNameAsString()) - .setType(SnapshotDescription.Type.FLUSH) - .build(); + HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder() + .setName("asyncSnapshot").setTable(TABLE_NAME.getNameAsString()) + .setType(HBaseProtos.SnapshotDescription.Type.FLUSH).build(); // take the snapshot async - admin.takeSnapshotAsync(snapshot); + admin.takeSnapshotAsync( + new SnapshotDescription("asyncSnapshot", TABLE_NAME.getNameAsString(), + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH))); // constantly loop, looking for the snapshot to complete HMaster master = UTIL.getMiniHBaseCluster().getMaster(); @@ -295,7 +304,8 @@ public class TestFlushSnapshotFromClient { // Take a snapshot String snapshotBeforeMergeName = "snapshotBeforeMerge"; - admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH); + admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)); // Clone the table TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge"); @@ -364,7 +374,7 @@ public class TestFlushSnapshotFromClient { // Take a snapshot String snapshotName = "snapshotAfterMerge"; SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(), - SnapshotDescription.Type.FLUSH, 3); + HBaseProtos.SnapshotDescription.Type.FLUSH, 3); // Clone the table TableName cloneName = TableName.valueOf("cloneMerge"); @@ -425,14 +435,16 @@ public class TestFlushSnapshotFromClient { @Override public void run() { try { - LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss)); + LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils + .toString(ProtobufUtil.createHBaseProtosSnapshotDesc(ss))); admin.takeSnapshotAsync(ss); } catch (Exception e) { LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString( - ss) + ProtobufUtil.createHBaseProtosSnapshotDesc(ss)) + ". This is ok, we expect some", e); } - LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss)); + LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils + .toString(ProtobufUtil.createHBaseProtosSnapshotDesc(ss))); toBeSubmitted.countDown(); } }; @@ -440,11 +452,15 @@ public class TestFlushSnapshotFromClient { // build descriptions SnapshotDescription[] descs = new SnapshotDescription[ssNum]; for (int i = 0; i < ssNum; i++) { - SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); - builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString()); - builder.setName("ss"+i); - builder.setType(SnapshotDescription.Type.FLUSH); - descs[i] = builder.build(); + HBaseProtos.SnapshotDescription.Builder builder = + HBaseProtos.SnapshotDescription.newBuilder(); + if(i %2 ==0) { + descs[i] = new SnapshotDescription("ss" + i, TABLE_NAME.getNameAsString(), + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)); + } else { + descs[i] = new SnapshotDescription("ss" + i, TABLE2_NAME.getNameAsString(), + ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)); + } } // kick each off its own thread diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java index 877ee21..04fce5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -116,7 +117,7 @@ public class TestRestoreFlushSnapshotFromClient { // take a snapshot admin.snapshot(Bytes.toString(snapshotName0), tableName, - SnapshotDescription.Type.FLUSH); + ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH)); LOG.info("=== after snapshot with 500 rows"); logFSTree(); @@ -129,7 +130,7 @@ public class TestRestoreFlushSnapshotFromClient { // take a snapshot of the updated table admin.snapshot(Bytes.toString(snapshotName1), tableName, - SnapshotDescription.Type.FLUSH); + ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH)); LOG.info("=== after snapshot with 1000 rows"); logFSTree(); table.close(); @@ -194,7 +195,8 @@ public class TestRestoreFlushSnapshotFromClient { TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis()); admin.cloneSnapshot(snapshotName0, clonedTableName); verifyRowCount(UTIL, clonedTableName, snapshot0Rows); - admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH); + admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, + ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH)); UTIL.deleteTable(clonedTableName); admin.cloneSnapshot(snapshotName2, clonedTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index dbb23a5..d5869ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -33,12 +33,12 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -1856,9 +1856,9 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { @Test public void testSplitOrMergeStatWhenHBCKAbort() throws Exception { admin.setSplitOrMergeEnabled(true, false, true, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); - boolean oldSplit = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); - boolean oldMerge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); + boolean oldSplit = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); + boolean oldMerge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE); assertTrue(oldSplit); assertTrue(oldMerge); @@ -1880,8 +1880,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { spiedHbck.onlineHbck(); spiedHbck.close(); - boolean split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); - boolean merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + boolean split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); + boolean merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE); assertFalse(split); assertFalse(merge); @@ -1892,8 +1892,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { hbck.onlineHbck(); hbck.close(); - split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); - merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); + merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE); assertTrue(split); assertTrue(merge);