From 1cdd1a078d8c778b8935bceddf43cb745dc98bba Mon Sep 17 00:00:00 2001 From: Guangxu Cheng Date: Wed, 28 Mar 2018 15:28:38 +0800 Subject: [PATCH] HBASE-19764 Fix Checkstyle errors in hbase-endpoint --- hbase-endpoint/pom.xml | 16 ++ .../client/coprocessor/AggregationClient.java | 196 ++++++++++++--------- .../client/coprocessor/AggregationHelper.java | 13 +- .../client/coprocessor/AsyncAggregationClient.java | 26 +-- .../hbase/coprocessor/AggregateImplementation.java | 22 +-- .../apache/hadoop/hbase/coprocessor/Export.java | 71 +++++--- .../security/access/SecureBulkLoadEndpoint.java | 11 +- .../coprocessor/ColumnAggregationEndpoint.java | 10 +- .../ColumnAggregationEndpointNullResponse.java | 12 +- .../ColumnAggregationEndpointWithErrors.java | 8 +- .../coprocessor/ProtobufCoprocessorService.java | 7 +- .../coprocessor/TestAsyncCoprocessorEndpoint.java | 14 +- .../hadoop/hbase/coprocessor/TestClassLoading.java | 8 +- .../hbase/coprocessor/TestCoprocessorEndpoint.java | 76 ++++---- ...estCoprocessorServiceBackwardCompatibility.java | 4 +- .../coprocessor/TestCoprocessorTableEndpoint.java | 36 ++-- .../coprocessor/TestRowProcessorEndpoint.java | 6 +- .../hadoop/hbase/coprocessor/TestSecureExport.java | 55 +++--- .../regionserver/SecureBulkLoadEndpointClient.java | 4 +- ...HRegionServerBulkLoadWithOldSecureEndpoint.java | 26 +-- .../regionserver/TestServerCustomProtocol.java | 36 ++-- 21 files changed, 381 insertions(+), 276 deletions(-) diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index e9a8cf7132..55be46b7f6 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -69,6 +69,22 @@ net.revelc.code warbucks-maven-plugin + + org.apache.maven.plugins + maven-checkstyle-plugin + + + checkstyle + validate + + check + + + true + + + + diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 7b071f412c..13f94938bc 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -42,9 +42,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; @@ -58,6 +55,9 @@ import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRespo import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This client class is for invoking the aggregate functions deployed on the @@ -135,7 +135,7 @@ public class AggregationClient implements Closeable { /** * Constructor with Conf object - * @param cfg + * @param cfg Configuration to use */ public AggregationClient(Configuration cfg) { try { @@ -157,9 +157,9 @@ public class AggregationClient implements Closeable { * It gives the maximum value of a column for a given column family for the * given range. In case qualifier is null, a max of all values for the given * family is returned. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter columnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return max val <R> * @throws Throwable * The caller is supposed to handle the exception as they are thrown @@ -177,16 +177,16 @@ public class AggregationClient implements Closeable { * It gives the maximum value of a column for a given column family for the * given range. In case qualifier is null, a max of all values for the given * family is returned. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return max val <> * @throws Throwable * The caller is supposed to handle the exception as they are thrown * & propagated to it. */ public - R max(final Table table, final ColumnInterpreter ci, + R max(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MaxCallBack implements Batch.Callback { @@ -231,11 +231,13 @@ public class AggregationClient implements Closeable { * It gives the minimum value of a column for a given column family for the * given range. In case qualifier is null, a min of all values for the given * family is returned. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return min val <R> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public R min( final TableName tableName, final ColumnInterpreter ci, final Scan scan) @@ -249,14 +251,16 @@ public class AggregationClient implements Closeable { * It gives the minimum value of a column for a given column family for the * given range. In case qualifier is null, a min of all values for the given * family is returned. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return min val <R> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - R min(final Table table, final ColumnInterpreter ci, + R min(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MinCallBack implements Batch.Callback { @@ -305,17 +309,19 @@ public class AggregationClient implements Closeable { * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be * counted ==> an error. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public long rowCount( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + throws Throwable { try (Table table = connection.getTable(tableName)) { - return rowCount(table, ci, scan); + return rowCount(table, ci, scan); } } @@ -326,14 +332,16 @@ public class AggregationClient implements Closeable { * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be * counted ==> an error. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - long rowCount(final Table table, + long rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback { @@ -373,31 +381,35 @@ public class AggregationClient implements Closeable { /** * It sums up the value returned from various regions. In case qualifier is * null, summation of all the column qualifiers in the given family is done. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return sum <S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public S sum( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { - return sum(table, ci, scan); + return sum(table, ci, scan); } } /** * It sums up the value returned from various regions. In case qualifier is * null, summation of all the column qualifiers in the given family is done. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return sum <S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - S sum(final Table table, final ColumnInterpreter ci, + S sum(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); @@ -443,15 +455,17 @@ public class AggregationClient implements Closeable { * It computes average while fetching sum and row count from all the * corresponding regions. Approach is to compute a global sum of region level * sum and rowcount and then compute the average. - * @param tableName - * @param scan + * @param tableName the name of the table to scan + * @param scan the HBase scan object to use to read data from HBase * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ private Pair getAvgArgs( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { - return getAvgArgs(table, ci, scan); + return getAvgArgs(table, ci, scan); } } @@ -459,17 +473,19 @@ public class AggregationClient implements Closeable { * It computes average while fetching sum and row count from all the * corresponding regions. Approach is to compute a global sum of region level * sum and rowcount and then compute the average. - * @param table - * @param scan + * @param table table to scan. + * @param scan the HBase scan object to use to read data from HBase * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ private - Pair getAvgArgs(final Table table, + Pair getAvgArgs(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback> { S sum = null; - Long rowCount = 0l; + Long rowCount = 0L; public synchronized Pair getAvgArgs() { return new Pair<>(sum, rowCount); @@ -518,14 +534,16 @@ public class AggregationClient implements Closeable { * its return type should be a decimal value, irrespective of what * columninterpreter says. So, this methods collects the necessary parameters * to compute the average and returs the double value. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - double avg(final TableName tableName, + double avg(final TableName tableName, final ColumnInterpreter ci, Scan scan) throws Throwable { Pair p = getAvgArgs(tableName, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); @@ -537,11 +555,13 @@ public class AggregationClient implements Closeable { * its return type should be a decimal value, irrespective of what * columninterpreter says. So, this methods collects the necessary parameters * to compute the average and returs the double value. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public double avg( final Table table, final ColumnInterpreter ci, Scan scan) throws Throwable { @@ -555,17 +575,19 @@ public class AggregationClient implements Closeable { * average*average). From individual regions, it obtains sum, square sum and * number of rows. With these, the above values are computed to get the global * std. - * @param table - * @param scan + * @param table table to scan. + * @param scan the HBase scan object to use to read data from HBase * @return standard deviations * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ private - Pair, Long> getStdArgs(final Table table, + Pair, Long> getStdArgs(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback, Long>> { - long rowCountVal = 0l; + long rowCountVal = 0L; S sumVal = null, sumSqVal = null; public synchronized Pair, Long> getStdParams() { @@ -626,17 +648,19 @@ public class AggregationClient implements Closeable { * return type should be a decimal value, irrespective of what * columninterpreter says. So, this methods collects the necessary parameters * to compute the std and returns the double value. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - double std(final TableName tableName, ColumnInterpreter ci, + double std(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { - return std(table, ci, scan); + return std(table, ci, scan); } } @@ -646,11 +670,13 @@ public class AggregationClient implements Closeable { * return type should be a decimal value, irrespective of what * columninterpreter says. So, this methods collects the necessary parameters * to compute the std and returns the double value. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public double std( final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { @@ -667,17 +693,19 @@ public class AggregationClient implements Closeable { * It helps locate the region with median for a given column whose weight * is specified in an optional column. * From individual regions, it obtains sum of values and sum of weights. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return pair whose first element is a map between start row of the region - * and (sum of values, sum of weights) for the region, the second element is - * (sum of values, sum of weights) for all the regions chosen + * and (sum of values, sum of weights) for the region, the second element is + * (sum of values, sum of weights) for all the regions chosen * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ private - Pair>, List> - getMedianArgs(final Table table, + Pair>, List> + getMedianArgs(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); final NavigableMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -731,17 +759,19 @@ public class AggregationClient implements Closeable { * This is the client side interface/handler for calling the median method for a * given cf-cq combination. This method collects the necessary parameters * to compute the median and returns the median. - * @param tableName - * @param ci - * @param scan + * @param tableName the name of the table to scan + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return R the median * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - R median(final TableName tableName, ColumnInterpreter ci, + R median(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { - return median(table, ci, scan); + return median(table, ci, scan); } } @@ -749,14 +779,16 @@ public class AggregationClient implements Closeable { * This is the client side interface/handler for calling the median method for a * given cf-cq combination. This method collects the necessary parameters * to compute the median and returns the median. - * @param table - * @param ci - * @param scan + * @param table table to scan. + * @param ci ColumnInterpreter + * @param scan the HBase scan object to use to read data from HBase * @return R the median * @throws Throwable + * The caller is supposed to handle the exception as they are thrown + * & propagated to it. */ public - R median(final Table table, ColumnInterpreter ci, + R median(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { Pair>, List> p = getMedianArgs(table, ci, scan); byte[] startRow = null; @@ -776,14 +808,18 @@ public class AggregationClient implements Closeable { for (Map.Entry> entry : map.entrySet()) { S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0); double newSumVal = movingSumVal + ci.divideForAvg(s, 1L); - if (newSumVal > halfSumVal) break; // we found the region with the median + if (newSumVal > halfSumVal) { + break; + } // we found the region with the median movingSumVal = newSumVal; startRow = entry.getKey(); } // scan the region with median and find it Scan scan2 = new Scan(scan); // inherit stop row from method parameter - if (startRow != null) scan2.setStartRow(startRow); + if (startRow != null) { + scan2.setStartRow(startRow); + } ResultScanner scanner = null; try { int cacheSize = scan2.getCaching(); @@ -815,8 +851,8 @@ public class AggregationClient implements Closeable { movingSumVal = newSumVal; kv = r.getColumnLatestCell(colFamily, qualifier); value = ci.getValue(colFamily, qualifier, kv); - } } + } } while (results != null && results.length > 0); } finally { if (scanner != null) { diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java index 010451a34b..da31bac856 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java @@ -27,21 +27,22 @@ import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Helper class for constructing aggregation request and response. */ @InterfaceAudience.Private -public class AggregationHelper { +public final class AggregationHelper { + private AggregationHelper() {} /** - * @param scan + * @param scan the HBase scan object to use to read data from HBase * @param canFamilyBeAbsent whether column family can be absent in familyMap of scan */ private static void validateParameters(Scan scan, boolean canFamilyBeAbsent) throws IOException { @@ -64,8 +65,8 @@ public class AggregationHelper { validateParameters(scan, canFamilyBeAbsent); final AggregateRequest.Builder requestBuilder = AggregateRequest.newBuilder(); requestBuilder.setInterpreterClassName(ci.getClass().getCanonicalName()); - P columnInterpreterSpecificData = null; - if ((columnInterpreterSpecificData = ci.getRequestData()) != null) { + P columnInterpreterSpecificData = ci.getRequestData(); + if (columnInterpreterSpecificData != null) { requestBuilder.setInterpreterSpecificBytes(columnInterpreterSpecificData.toByteString()); } requestBuilder.setScan(ProtobufUtil.toScan(scan)); @@ -80,7 +81,7 @@ public class AggregationHelper { * @param position the position of the argument in the class declaration * @param b the ByteString which should be parsed to get the instance created * @return the instance - * @throws IOException + * @throws IOException Either we couldn't instantiate the method object, or "parseFrom" failed. */ @SuppressWarnings("unchecked") // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO. diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java index 371e865f1e..45d2a40812 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java @@ -52,7 +52,9 @@ import org.apache.yetus.audience.InterfaceAudience; * summing/processing the individual results obtained from the AggregateService for each region. */ @InterfaceAudience.Public -public class AsyncAggregationClient { +public final class AsyncAggregationClient { + + private AsyncAggregationClient() {} private static abstract class AbstractAggregationCallback implements CoprocessorCallback { @@ -365,20 +367,20 @@ public class AsyncAggregationClient { AbstractAggregationCallback> callback = new AbstractAggregationCallback>(future) { - private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); - @Override - protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { - if (resp.getFirstPartCount() > 0) { - map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); - } + @Override + protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { + if (resp.getFirstPartCount() > 0) { + map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); } + } - @Override - protected NavigableMap getFinalResult() { - return map; - } - }; + @Override + protected NavigableMap getFinalResult() { + return map; + } + }; table . coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index c8badd3692..7290c030e5 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -36,9 +36,6 @@ import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; @@ -47,6 +44,9 @@ import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateReque import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse; import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateService; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A concrete AggregateProtocol implementation. Its system level coprocessor @@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; */ @InterfaceAudience.Private public class AggregateImplementation -extends AggregateService implements RegionCoprocessor { + extends AggregateService implements RegionCoprocessor { protected static final Logger log = LoggerFactory.getLogger(AggregateImplementation.class); private RegionCoprocessorEnvironment env; @@ -186,7 +186,7 @@ extends AggregateService implements RegionCoprocessor { RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; - long sum = 0l; + long sum = 0L; try { ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); S sumVal = null; @@ -206,8 +206,9 @@ extends AggregateService implements RegionCoprocessor { int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); - if (temp != null) + if (temp != null) { sumVal = ci.add(sumVal, ci.castToReturnType(temp)); + } } results.clear(); } while (hasMoreRows); @@ -237,7 +238,7 @@ extends AggregateService implements RegionCoprocessor { public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback done) { AggregateResponse response = null; - long counter = 0l; + long counter = 0L; List results = new ArrayList<>(); InternalScanner scanner = null; try { @@ -250,8 +251,9 @@ extends AggregateService implements RegionCoprocessor { if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } - if (scan.getFilter() == null && qualifier == null) + if (scan.getFilter() == null && qualifier == null) { scan.setFilter(new FirstKeyOnlyFilter()); + } scanner = env.getRegion().getScanner(scan); boolean hasMoreRows = false; do { @@ -300,7 +302,7 @@ extends AggregateService implements RegionCoprocessor { try { ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); S sumVal = null; - Long rowCountVal = 0l; + Long rowCountVal = 0L; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); byte[] colFamily = scan.getFamilies()[0]; @@ -360,7 +362,7 @@ extends AggregateService implements RegionCoprocessor { try { ColumnInterpreter ci = constructColumnInterpreterFromRequest(request); S sumVal = null, sumSqVal = null, tempVal = null; - long rowCountVal = 0l; + long rowCountVal = 0L; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); byte[] colFamily = scan.getFamilies()[0]; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index 12b8b8e58c..ea6772c357 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase.coprocessor; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.Closeable; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -77,10 +81,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * Export an HBase table. Writes content to sequence files up in HDFS. Use * {@link Import} to read it back in again. It is implemented by the endpoint @@ -94,7 +94,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces private static final Logger LOG = LoggerFactory.getLogger(Export.class); private static final Class DEFAULT_CODEC = DefaultCodec.class; - private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; + private static final SequenceFile.CompressionType DEFAULT_TYPE = + SequenceFile.CompressionType.RECORD; private RegionCoprocessorEnvironment env = null; private UserProvider userProvider; @@ -110,11 +111,13 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.getLength(otherArgs)); return null; } - Triple arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); + Triple arguments = + ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); } - public static Map run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { + public static Map run(final Configuration conf, TableName tableName, + Scan scan, Path dir) throws Throwable { FileSystem fs = dir.getFileSystem(conf); UserProvider userProvider = UserProvider.instantiate(conf); checkDir(fs, dir); @@ -158,7 +161,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private static SequenceFile.CompressionType getCompressionType(final ExportProtos.ExportRequest request) { + private static SequenceFile.CompressionType getCompressionType( + final ExportProtos.ExportRequest request) { if (request.hasCompressType()) { return SequenceFile.CompressionType.valueOf(request.getCompressType()); } else { @@ -166,11 +170,13 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private static CompressionCodec getCompressionCodec(final Configuration conf, final ExportProtos.ExportRequest request) { + private static CompressionCodec getCompressionCodec(final Configuration conf, + final ExportProtos.ExportRequest request) { try { Class codecClass; if (request.hasCompressCodec()) { - codecClass = conf.getClassByName(request.getCompressCodec()).asSubclass(CompressionCodec.class); + codecClass = conf.getClassByName(request.getCompressCodec()) + .asSubclass(CompressionCodec.class); } else { codecClass = DEFAULT_CODEC; } @@ -198,16 +204,17 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces rval.add(SequenceFile.Writer.valueClass(Result.class)); rval.add(getOutputPath(conf, info, request)); if (getCompression(request)) { - rval.add(SequenceFile.Writer.compression(getCompressionType(request), getCompressionCodec(conf, request))); + rval.add(SequenceFile.Writer.compression(getCompressionType(request), + getCompressionCodec(conf, request))); } else { rval.add(SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); } return rval; } - private static ExportProtos.ExportResponse processData(final Region region, final Configuration conf, - final UserProvider userProvider, final Scan scan, final Token userToken, - final List opts) throws IOException { + private static ExportProtos.ExportResponse processData(final Region region, + final Configuration conf, final UserProvider userProvider, final Scan scan, + final Token userToken, final List opts) throws IOException { ScanCoprocessor cp = new ScanCoprocessor(region); RegionScanner scanner = null; try (RegionOp regionOp = new RegionOp(region); @@ -230,11 +237,15 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } Cell firstCell = cells.get(0); for (Cell cell : cells) { - if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength(), - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) != 0) { - throw new IOException("Why the RegionScanner#nextRaw returns the data of different rows??" - + " first row=" + Bytes.toHex(firstCell.getRowArray(), firstCell.getRowOffset(), firstCell.getRowLength()) - + ", current row=" + Bytes.toHex(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), + firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength()) != 0) { + throw new IOException("Why the RegionScanner#nextRaw returns the data of different" + + " rows?? first row=" + + Bytes.toHex(firstCell.getRowArray(), firstCell.getRowOffset(), + firstCell.getRowLength()) + + ", current row=" + + Bytes.toHex(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } } results.add(Result.create(cells)); @@ -323,7 +334,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces RpcCallback done) { Region region = env.getRegion(); Configuration conf = HBaseConfiguration.create(env.getConfiguration()); - conf.setStrings("io.serializations", conf.get("io.serializations"), ResultSerialization.class.getName()); + conf.setStrings("io.serializations", conf.get("io.serializations"), + ResultSerialization.class.getName()); try { Scan scan = validateKey(region.getRegionInfo(), request); Token userToken = null; @@ -344,7 +356,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private Scan validateKey(final RegionInfo region, final ExportProtos.ExportRequest request) throws IOException { + private Scan validateKey(final RegionInfo region, final ExportProtos.ExportRequest request) + throws IOException { Scan scan = ProtobufUtil.toScan(request.getScan()); byte[] regionStartKey = region.getStartKey(); byte[] originStartKey = scan.getStartRow(); @@ -439,17 +452,20 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces private static class SecureWriter implements Closeable { private final PrivilegedWriter privilegedWriter; - SecureWriter(final Configuration conf, final UserProvider userProvider, final Token userToken, - final List opts) throws IOException { + SecureWriter(final Configuration conf, final UserProvider userProvider, + final Token userToken, final List opts) + throws IOException { privilegedWriter = new PrivilegedWriter(getActiveUser(userProvider, userToken), - SequenceFile.createWriter(conf, opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); + SequenceFile.createWriter(conf, + opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); } void append(final Object key, final Object value) throws IOException { privilegedWriter.append(key, value); } - private static User getActiveUser(final UserProvider userProvider, final Token userToken) throws IOException { + private static User getActiveUser(final UserProvider userProvider, final Token userToken) + throws IOException { User user = RpcServer.getRequestUser().orElse(userProvider.getCurrent()); if (user == null && userToken != null) { LOG.warn("No found of user credentials, but a token was got from user request"); @@ -465,7 +481,8 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - private static class PrivilegedWriter implements PrivilegedExceptionAction, Closeable { + private static class PrivilegedWriter implements PrivilegedExceptionAction, + Closeable { private final User user; private final SequenceFile.Writer out; private Object key; @@ -502,7 +519,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } } - public static class Response { + public final static class Response { private final long rowCount; private final long cellCount; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index a603ef14f2..4fe289a08a 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -18,6 +18,10 @@ package org.apache.hadoop.hbase.security.access; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.Collections; import java.util.List; @@ -45,9 +49,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -125,7 +126,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg * @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest - convert(CleanupBulkLoadRequest request) + convert(CleanupBulkLoadRequest request) throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { byte [] bytes = request.toByteArray(); org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder @@ -154,7 +155,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg } org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest - convert(BulkLoadHFileRequest request) + convert(BulkLoadHFileRequest request) throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { byte [] bytes = request.toByteArray(); org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index fd570e7854..eb5a1d169a 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase.coprocessor; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -36,15 +40,11 @@ import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * The aggregation implementation at a region. */ public class ColumnAggregationEndpoint extends ColumnAggregationService -implements RegionCoprocessor { + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpoint.class); private RegionCoprocessorEnvironment env = null; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java index 5effbe9fa4..6f5b2671e1 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.coprocessor; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -27,20 +31,16 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.ColumnAggregationServiceNullResponse; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.ColumnAggregationNullResponseSumRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.ColumnAggregationNullResponseSumResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.ColumnAggregationServiceNullResponse; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * Test coprocessor endpoint that always returns {@code null} for requests to the last region * in the table. This allows tests to provide assurance of correct {@code null} handling for diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index 39e3b12cd2..f77b018485 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.coprocessor; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -38,10 +42,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; - /** * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on * the last region in the table. This allows tests to ensure correct error handling of diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index 6fc4eb9886..365c528bd7 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -21,6 +21,10 @@ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; + +import java.io.IOException; +import java.util.Collections; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -32,9 +36,6 @@ import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.PauseRequestPro import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; import org.apache.hadoop.hbase.util.Threads; -import java.io.IOException; -import java.util.Collections; - /** * Test implementation of a coprocessor endpoint exposing the * {@link TestRpcServiceProtos.TestProtobufRpcProto} service methods. For internal use by unit tests diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java index 9d4b07df5b..dd2507382e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java @@ -80,8 +80,8 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestProtos.EchoResponseProto response = admin - . coprocessorService( - TestRpcServiceProtos.TestProtobufRpcProto::newStub, + . + coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, (s, c, done) -> s.echo(c, request, done)).get(); assertEquals("hello", response.getMessage()); } @@ -91,8 +91,8 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { TestProtos.EmptyRequestProto emptyRequest = TestProtos.EmptyRequestProto.getDefaultInstance(); try { admin - . coprocessorService( - TestRpcServiceProtos.TestProtobufRpcProto::newStub, + . + coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, (s, c, done) -> s.error(c, emptyRequest, done)).get(); fail("Should have thrown an exception"); } catch (Exception e) { @@ -106,7 +106,8 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); DummyRegionServerEndpointProtos.DummyResponse response = admin - . coprocessorService( + . coprocessorService( DummyRegionServerEndpointProtos.DummyService::newStub, (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); assertEquals(DUMMY_VALUE, response.getValue()); @@ -119,7 +120,8 @@ public class TestAsyncCoprocessorEndpoint extends TestAsyncAdminBase { DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); try { admin - . coprocessorService( + . coprocessorService( DummyRegionServerEndpointProtos.DummyService::newStub, (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); fail("Should have thrown an exception"); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 1bf36f5e86..63634ddd27 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -98,9 +98,7 @@ public class TestClassLoading { private static Class masterCoprocessor = TestMasterCoprocessor.class; private static final String[] regionServerSystemCoprocessors = - new String[]{ - regionServerCoprocessor.getSimpleName() - }; + new String[]{ regionServerCoprocessor.getSimpleName() }; private static final String[] masterRegionServerSystemCoprocessors = new String[] { regionCoprocessor1.getSimpleName(), MultiRowMutationEndpoint.class.getSimpleName(), @@ -211,7 +209,9 @@ public class TestClassLoading { found2_k2 = found2_k2 && (conf.get("k2") != null); found2_k3 = found2_k3 && (conf.get("k3") != null); } else { - found2_k1 = found2_k2 = found2_k3 = false; + found2_k1 = false; + found2_k2 = false; + found2_k3 = false; } regionsActiveClassLoaders .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index 87409a7813..f50d8f4a06 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -191,26 +191,26 @@ public class TestCoprocessorEndpoint { // scan: for all regions final RpcController controller = new ServerRpcController(); table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], - new Batch.Call() { - public TestProtos.EchoResponseProto call(TestRpcServiceProtos.TestProtobufRpcProto instance) - throws IOException { - LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); - CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.echo(controller, request, callback); - TestProtos.EchoResponseProto response = callback.get(); - LOG.debug("Batch.Call returning result " + response); - return response; - } - }, - new Batch.Callback() { - public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { - assertNotNull(result); - assertEquals("hello", result.getMessage()); - results.put(region, result.getMessage()); - } + ROWS[0], ROWS[ROWS.length - 1], + new Batch.Call() { + public TestProtos.EchoResponseProto call( + TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); + CoprocessorRpcUtils.BlockingRpcCallback callback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.echo(controller, request, callback); + TestProtos.EchoResponseProto response = callback.get(); + LOG.debug("Batch.Call returning result " + response); + return response; } + }, + new Batch.Callback() { + public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { + assertNotNull(result); + assertEquals("hello", result.getMessage()); + results.put(region, result.getMessage()); + } + } ); for (Map.Entry e : results.entrySet()) { LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); @@ -224,26 +224,26 @@ public class TestCoprocessorEndpoint { // scan: for region 2 and region 3 table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[rowSeperator1], ROWS[ROWS.length - 1], - new Batch.Call() { - public TestProtos.EchoResponseProto call(TestRpcServiceProtos.TestProtobufRpcProto instance) - throws IOException { - LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); - CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.echo(controller, request, callback); - TestProtos.EchoResponseProto response = callback.get(); - LOG.debug("Batch.Call returning result " + response); - return response; - } - }, - new Batch.Callback() { - public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { - assertNotNull(result); - assertEquals("hello", result.getMessage()); - results.put(region, result.getMessage()); - } + ROWS[rowSeperator1], ROWS[ROWS.length - 1], + new Batch.Call() { + public TestProtos.EchoResponseProto call( + TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); + CoprocessorRpcUtils.BlockingRpcCallback callback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.echo(controller, request, callback); + TestProtos.EchoResponseProto response = callback.get(); + LOG.debug("Batch.Call returning result " + response); + return response; } + }, + new Batch.Callback() { + public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { + assertNotNull(result); + assertEquals("hello", result.getMessage()); + results.put(region, result.getMessage()); + } + } ); for (Map.Entry e : results.entrySet()) { LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java index e7181bb054..a5cf367a66 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java @@ -26,7 +26,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.*; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyResponse; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyService; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java index fbcbb54f68..ea05509e34 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -81,7 +81,7 @@ public class TestCoprocessorTableEndpoint { HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); - desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName()); + desc.addCoprocessor(ColumnAggregationEndpoint.class.getName()); createTable(desc); verifyTable(tableName); @@ -96,7 +96,7 @@ public class TestCoprocessorTableEndpoint { createTable(desc); - desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName()); + desc.addCoprocessor(ColumnAggregationEndpoint.class.getName()); updateTable(desc); verifyTable(tableName); @@ -113,24 +113,24 @@ public class TestCoprocessorTableEndpoint { private static Map sum(final Table table, final byte [] family, final byte [] qualifier, final byte [] start, final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, end, - new Batch.Call() { - @Override - public Long call(ColumnAggregationProtos.ColumnAggregationService instance) - throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - ColumnAggregationProtos.SumRequest.Builder builder = - ColumnAggregationProtos.SumRequest.newBuilder(); - builder.setFamily(ByteString.copyFrom(family)); - if (qualifier != null && qualifier.length > 0) { - builder.setQualifier(ByteString.copyFrom(qualifier)); + new Batch.Call() { + @Override + public Long call(ColumnAggregationProtos.ColumnAggregationService instance) + throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ColumnAggregationProtos.SumRequest.Builder builder = + ColumnAggregationProtos.SumRequest.newBuilder(); + builder.setFamily(ByteString.copyFrom(family)); + if (qualifier != null && qualifier.length > 0) { + builder.setQualifier(ByteString.copyFrom(qualifier)); + } + instance.sum(null, builder.build(), rpcCallback); + return rpcCallback.get().getSum(); } - instance.sum(null, builder.build(), rpcCallback); - return rpcCallback.get().getSum(); - } - }); + }); } private static final void createTable(HTableDescriptor desc) throws Exception { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java index 01e5b59a11..b1eb5afb91 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java @@ -313,7 +313,7 @@ public class TestRowProcessorEndpoint { * So they can be loaded with the endpoint on the coprocessor. */ public static class RowProcessorEndpoint - extends BaseRowProcessorEndpoint { + extends BaseRowProcessorEndpoint { public static class IncrementCounterProcessor extends BaseRowProcessor { @@ -652,7 +652,9 @@ public class TestRowProcessorEndpoint { result.clear(); scanner.next(result); } finally { - if (scanner != null) scanner.close(); + if (scanner != null) { + scanner.close(); + } } } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 38c3081bb0..785cde68fc 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -147,7 +147,8 @@ public class TestSecureExport { USER_NONE + "/" + LOCALHOST); } private static User getUserByLogin(final String user) throws IOException { - return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI(getPrinciple(user), KEYTAB_FILE.getAbsolutePath())); + return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI( + getPrinciple(user), KEYTAB_FILE.getAbsolutePath())); } private static String getPrinciple(final String user) { return user + "/" + LOCALHOST + "@" + KDC.getRealm(); @@ -160,30 +161,41 @@ public class TestSecureExport { // the following key should be changed. // 1) DFS_NAMENODE_USER_NAME_KEY -> DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY // 2) DFS_DATANODE_USER_NAME_KEY -> DFS_DATANODE_KERBEROS_PRINCIPAL_KEY - UTIL.getConfiguration().set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, SERVER_PRINCIPAL + "@" + KDC.getRealm()); - UTIL.getConfiguration().set(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, SERVER_PRINCIPAL + "@" + KDC.getRealm()); - UTIL.getConfiguration().set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB_FILE.getAbsolutePath()); - UTIL.getConfiguration().set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB_FILE.getAbsolutePath()); + UTIL.getConfiguration().set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, + SERVER_PRINCIPAL + "@" + KDC.getRealm()); + UTIL.getConfiguration().set(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, + SERVER_PRINCIPAL + "@" + KDC.getRealm()); + UTIL.getConfiguration().set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, + KEYTAB_FILE.getAbsolutePath()); + UTIL.getConfiguration().set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, + KEYTAB_FILE.getAbsolutePath()); // set yarn principal - UTIL.getConfiguration().set(YarnConfiguration.RM_PRINCIPAL, SERVER_PRINCIPAL + "@" + KDC.getRealm()); - UTIL.getConfiguration().set(YarnConfiguration.NM_PRINCIPAL, SERVER_PRINCIPAL + "@" + KDC.getRealm()); - UTIL.getConfiguration().set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, HTTP_PRINCIPAL + "@" + KDC.getRealm()); + UTIL.getConfiguration().set(YarnConfiguration.RM_PRINCIPAL, + SERVER_PRINCIPAL + "@" + KDC.getRealm()); + UTIL.getConfiguration().set(YarnConfiguration.NM_PRINCIPAL, + SERVER_PRINCIPAL + "@" + KDC.getRealm()); + UTIL.getConfiguration().set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + HTTP_PRINCIPAL + "@" + KDC.getRealm()); UTIL.getConfiguration().setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); - UTIL.getConfiguration().set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); + UTIL.getConfiguration().set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, + HttpConfig.Policy.HTTPS_ONLY.name()); UTIL.getConfiguration().set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST + ":0"); UTIL.getConfiguration().set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, LOCALHOST + ":0"); File keystoresDir = new File(UTIL.getDataTestDir("keystore").toUri().getPath()); keystoresDir.mkdirs(); String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSecureExport.class); - KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, UTIL.getConfiguration(), false); + KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, + UTIL.getConfiguration(), false); UTIL.getConfiguration().setBoolean("ignore.secure.ports.for.testing", true); UserGroupInformation.setConfiguration(UTIL.getConfiguration()); - UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, UTIL.getConfiguration().get( - CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + Export.class.getName()); + UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + UTIL.getConfiguration().get( + CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + Export.class.getName()); } - private static void addLabels(final Configuration conf, final List users, final List labels) throws Exception { + private static void addLabels(final Configuration conf, final List users, + final List labels) throws Exception { PrivilegedExceptionAction action = () -> { try (Connection conn = ConnectionFactory.createConnection(conf)) { @@ -215,11 +227,11 @@ public class TestSecureExport { } /** * Sets the security firstly for getting the correct default realm. - * @throws Exception */ @BeforeClass public static void beforeClass() throws Exception { - UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), HadoopSecurityEnabledUserProviderForTesting.class); + UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), + HadoopSecurityEnabledUserProviderForTesting.class); setUpKdcServer(); SecureTestUtil.enableSecurity(UTIL.getConfiguration()); UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); @@ -252,11 +264,9 @@ public class TestSecureExport { /** * Test the ExportEndpoint's access levels. The {@link Export} test is ignored * since the access exceptions cannot be collected from the mappers. - * - * @throws java.io.IOException */ @Test - public void testAccessCase() throws IOException, Throwable { + public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); TableDescriptor exportHtd = TableDescriptorBuilder .newBuilder(TableName.valueOf(name.getMethodName())) @@ -343,7 +353,8 @@ public class TestSecureExport { public void testVisibilityLabels() throws IOException, Throwable { final String exportTable = name.getMethodName() + "_export"; final String importTable = name.getMethodName() + "_import"; - final TableDescriptor exportHtd = TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + final TableDescriptor exportHtd = TableDescriptorBuilder + .newBuilder(TableName.valueOf(exportTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) .setOwnerString(USER_OWNER) .build(); @@ -400,7 +411,8 @@ public class TestSecureExport { } }; SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_OWNER)); - final TableDescriptor importHtd = TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + final TableDescriptor importHtd = TableDescriptorBuilder + .newBuilder(TableName.valueOf(importTable)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) .setOwnerString(USER_OWNER) .build(); @@ -411,7 +423,8 @@ public class TestSecureExport { importTable, output.toString() }; - assertEquals(0, ToolRunner.run(new Configuration(UTIL.getConfiguration()), new Import(), args)); + assertEquals(0, ToolRunner.run( + new Configuration(UTIL.getConfiguration()), new Import(), args)); return null; }; SecureTestUtil.verifyAllowed(importAction, getUserByLogin(USER_OWNER)); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java index d32e6ea098..1efc014c44 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java @@ -22,10 +22,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; @@ -41,12 +39,14 @@ import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.token.Token; +import org.apache.yetus.audience.InterfaceAudience; /** * Client proxy for SecureBulkLoadProtocol used in conjunction with SecureBulkLoadEndpoint * @deprecated Use for backward compatibility testing only. Will be removed when * SecureBulkLoadEndpoint is not supported. */ +@Deprecated @InterfaceAudience.Private public class SecureBulkLoadEndpointClient { private Table table; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java index 7196851b92..d12830e0da 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java @@ -87,7 +87,7 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS private TableName tableName; public AtomicHFileLoader(TableName tableName, TestContext ctx, - byte targetFamilies[][]) throws IOException { + byte[][] targetFamilies) throws IOException { super(ctx); this.tableName = tableName; } @@ -114,19 +114,19 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS final String bulkToken = new SecureBulkLoadEndpointClient(table).prepareBulkLoad(tableName); RpcControllerFactory rpcControllerFactory = new RpcControllerFactory(UTIL.getConfiguration()); ClientServiceCallable callable = - new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("Going to connect to server " + getLocation() + " for row " + - Bytes.toStringBinary(getRow())); - try (Table table = conn.getTable(getTableName())) { - boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, - null, bulkToken, getLocation().getRegionInfo().getStartKey()); - } - return null; + new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), + rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { + @Override + protected Void rpcCall() throws Exception { + LOG.debug("Going to connect to server " + getLocation() + " for row " + + Bytes.toStringBinary(getRow())); + try (Table table = conn.getTable(getTableName())) { + boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, + null, bulkToken, getLocation().getRegionInfo().getStartKey()); } - }; + return null; + } + }; RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf); RpcRetryingCaller caller = factory. newCaller(); caller.callWithRetries(callable, Integer.MAX_VALUE); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index b306b76344..f8795338dc 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -84,7 +84,9 @@ public class TestServerCustomProtocol { @Override public void start(CoprocessorEnvironment env) throws IOException { - if (env instanceof RegionCoprocessorEnvironment) return; + if (env instanceof RegionCoprocessorEnvironment) { + return; + } throw new CoprocessorException("Must be loaded on a table region!"); } @@ -116,9 +118,15 @@ public class TestServerCustomProtocol { @Override public void hello(RpcController controller, HelloRequest request, RpcCallback done) { - if (!request.hasName()) done.run(HelloResponse.newBuilder().setResponse(WHOAREYOU).build()); - else if (request.getName().equals(NOBODY)) done.run(HelloResponse.newBuilder().build()); - else done.run(HelloResponse.newBuilder().setResponse(HELLO + request.getName()).build()); + if (!request.hasName()) { + done.run(HelloResponse.newBuilder().setResponse(WHOAREYOU).build()); + } + else if (request.getName().equals(NOBODY)) { + done.run(HelloResponse.newBuilder().build()); + } + else { + done.run(HelloResponse.newBuilder().setResponse(HELLO + request.getName()).build()); + } } @Override @@ -153,19 +161,19 @@ public class TestServerCustomProtocol { } @Before - public void before() throws Exception { + public void before() throws Exception { final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C }; Table table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS); - Put puta = new Put( ROW_A ); + Put puta = new Put(ROW_A); puta.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1)); table.put(puta); - Put putb = new Put( ROW_B ); + Put putb = new Put(ROW_B); putb.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1)); table.put(putb); - Put putc = new Put( ROW_C ); + Put putc = new Put(ROW_C); putc.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1)); table.put(putc); } @@ -258,7 +266,9 @@ public class TestServerCustomProtocol { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); - if (send != null) builder.setName(send); + if (send != null) { + builder.setName(send); + } instance.hello(null, builder.build(), rpcCallback); PingProtos.HelloResponse r = rpcCallback.get(); return r != null && r.hasResponse()? r.getResponse(): null; @@ -410,8 +420,8 @@ public class TestServerCustomProtocol { private static String doPing(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback); - return rpcCallback.get().getPong(); + instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback); + return rpcCallback.get().getPong(); } @Test @@ -469,8 +479,8 @@ public class TestServerCustomProtocol { throws Exception { for (Map.Entry e: results.entrySet()) { LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + - ", result key=" + Bytes.toString(e.getKey()) + - ", value=" + e.getValue()); + ", result key=" + Bytes.toString(e.getKey()) + + ", value=" + e.getValue()); } HRegionLocation loc = regionLocator.getRegionLocation(row, true); byte[] region = loc.getRegionInfo().getRegionName(); -- 2.13