From d66795c15064cc562cc304220521dab152133b4f Mon Sep 17 00:00:00 2001 From: zhangduo Date: Fri, 29 Sep 2017 21:23:12 +0800 Subject: [PATCH] HBASE-18878 Use Optional instead of Nullable annotation in ObserverContext to better describe that the caller may not be presented --- .../java/org/apache/hadoop/hbase/util/Classes.java | 12 ++-- .../apache/hadoop/hbase/coprocessor/Export.java | 5 +- .../coprocessor/ProtobufCoprocessorService.java | 4 +- .../hadoop/hbase/regionserver/CompactionTool.java | 6 +- .../hadoop/hbase/client/VersionInfoUtil.java | 3 +- .../hadoop/hbase/coprocessor/CoprocessorHost.java | 18 ++--- .../hadoop/hbase/coprocessor/ObserverContext.java | 83 ++++++++++++---------- .../org/apache/hadoop/hbase/ipc/CallRunner.java | 5 +- .../apache/hadoop/hbase/ipc/PriorityFunction.java | 11 +-- .../apache/hadoop/hbase/ipc/RpcCallContext.java | 13 ++-- .../org/apache/hadoop/hbase/ipc/RpcServer.java | 51 +++++++------ .../org/apache/hadoop/hbase/ipc/ServerCall.java | 21 +++--- .../hadoop/hbase/master/MasterCoprocessorHost.java | 3 +- .../hbase/master/procedure/MasterProcedureEnv.java | 6 +- .../hbase/master/snapshot/SnapshotManager.java | 24 ++++--- .../hadoop/hbase/mob/DefaultMobStoreCompactor.java | 3 +- .../hbase/quotas/RegionServerRpcQuotaManager.java | 8 +-- .../AnnotationReadingPriorityFunction.java | 15 ++-- .../hadoop/hbase/regionserver/CompactSplit.java | 17 ++--- .../hbase/regionserver/DateTieredStoreEngine.java | 4 +- .../hbase/regionserver/DefaultStoreEngine.java | 3 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 31 ++++---- .../hadoop/hbase/regionserver/HRegionServer.java | 11 +-- .../apache/hadoop/hbase/regionserver/HStore.java | 8 +-- .../hadoop/hbase/regionserver/RSRpcServices.java | 23 +++--- .../apache/hadoop/hbase/regionserver/Region.java | 7 +- .../hbase/regionserver/RegionCoprocessorHost.java | 69 +++++++++--------- .../regionserver/RegionServerCoprocessorHost.java | 6 +- .../hbase/regionserver/SecureBulkLoadManager.java | 63 ++++++++-------- .../hbase/regionserver/StripeStoreEngine.java | 3 +- .../compactions/CompactionContext.java | 3 +- .../hbase/regionserver/compactions/Compactor.java | 11 +-- .../compactions/DateTieredCompactor.java | 3 +- .../regionserver/compactions/DefaultCompactor.java | 5 +- .../compactions/StripeCompactionPolicy.java | 11 +-- .../regionserver/compactions/StripeCompactor.java | 9 +-- .../hbase/security/access/AccessController.java | 59 ++++++++------- .../hadoop/hbase/security/token/TokenProvider.java | 35 ++++----- .../security/visibility/VisibilityController.java | 27 +++---- .../hbase/security/visibility/VisibilityUtils.java | 11 ++- .../org/apache/hadoop/hbase/QosTestHelper.java | 11 +-- .../org/apache/hadoop/hbase/TestIOFencing.java | 3 +- .../TestRegionObserverScannerOpenHook.java | 2 +- .../hbase/ipc/TestProtobufRpcServiceImpl.java | 4 +- .../hadoop/hbase/ipc/TestSimpleRpcScheduler.java | 20 +++--- .../hbase/regionserver/StatefulStoreMockMaker.java | 5 +- .../hadoop/hbase/regionserver/TestCompaction.java | 31 ++++---- .../hadoop/hbase/regionserver/TestHMobStore.java | 7 +- .../hadoop/hbase/regionserver/TestHStore.java | 4 +- .../hbase/regionserver/TestMajorCompaction.java | 5 +- .../hadoop/hbase/regionserver/TestPriorityRpc.java | 21 +++--- .../hbase/regionserver/TestStripeStoreEngine.java | 11 +-- .../compactions/TestStripeCompactionPolicy.java | 18 ++--- .../compactions/TestStripeCompactor.java | 5 +- 54 files changed, 437 insertions(+), 420 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java index 2366daf..f4a098d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java @@ -1,4 +1,4 @@ -/* +/** * Copyright The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one @@ -17,7 +17,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.util; import org.apache.yetus.audience.InterfaceAudience; @@ -64,10 +63,10 @@ public class Classes { return valueType; } - public static String stringify(Class[] classes) { + public static String stringify(Class[] classes) { StringBuilder buf = new StringBuilder(); if (classes != null) { - for (Class c : classes) { + for (Class c : classes) { if (buf.length() > 0) { buf.append(","); } @@ -78,4 +77,9 @@ public class Classes { } return buf.toString(); } + + @SuppressWarnings("unchecked") + public static Class cast(Class clazz) { + return (Class) clazz; + } } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index 667f7a3..a00af0f 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -453,10 +453,7 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces } private static User getActiveUser(final UserProvider userProvider, final Token userToken) throws IOException { - User user = RpcServer.getRequestUser(); - if (user == null) { - user = userProvider.getCurrent(); - } + User user = RpcServer.getRequestUser().orElse(userProvider.getCurrent()); if (user == null && userToken != null) { LOG.warn("No found of user credentials, but a token was got from user request"); } else if (user != null && userToken != null) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index c69baee..6fc4eb9 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -79,8 +79,8 @@ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobu @Override public void addr(RpcController controller, EmptyRequestProto request, RpcCallback done) { - done.run(AddrResponseProto.newBuilder().setAddr(RpcServer.getRemoteAddress().getHostAddress()) - .build()); + done.run(AddrResponseProto.newBuilder() + .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index aec5fa0..f920b50 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -161,13 +161,13 @@ public class CompactionTool extends Configured implements Tool { store.triggerMajorCompaction(); } do { - Optional compaction = - store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); + Optional compaction = store.requestCompaction(PRIORITY_USER, + CompactionLifeCycleTracker.DUMMY, Optional.empty()); if (!compaction.isPresent()) { break; } List storeFiles = - store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); + store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, Optional.empty()); if (storeFiles != null && !storeFiles.isEmpty()) { if (keepCompactedFiles && deleteCompacted) { for (HStoreFile storeFile: storeFiles) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index 1866c20..95984de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -71,8 +71,7 @@ public final class VersionInfoUtil { * @return the versionInfo extracted from the current RpcCallContext */ private static HBaseProtos.VersionInfo getCurrentClientVersionInfo() { - RpcCallContext call = RpcServer.getCurrentCall(); - return call != null ? call.getClientVersionInfo() : null; + return RpcServer.getCurrentCall().map(RpcCallContext::getClientVersionInfo).orElse(null); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index da07c40..3ef9ad2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; @@ -35,8 +33,6 @@ import java.util.function.Function; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; @@ -47,9 +43,12 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedList; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; /** * Provides the common setup framework and runtime services for coprocessor @@ -552,7 +551,7 @@ public abstract class CoprocessorHost observerGetter, User user) { + ObserverOperation(ObserverGetter observerGetter, Optional user) { super(user); this.observerGetter = observerGetter; } @@ -571,7 +570,8 @@ public abstract class CoprocessorHost observerGetter, User user) { + public ObserverOperationWithoutResult(ObserverGetter observerGetter, + Optional user) { super(observerGetter, user); } @@ -599,7 +599,7 @@ public abstract class CoprocessorHost observerGetter, User user) { + public ObserverOperationWithResult(ObserverGetter observerGetter, Optional user) { super(observerGetter, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java index 0192ea3..a40323f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/ObserverContext.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.coprocessor; -import edu.umd.cs.findbugs.annotations.Nullable; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; +import java.util.Optional; + import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; /** * Carries the execution state for a given invocation of an Observer coprocessor @@ -43,9 +42,9 @@ public class ObserverContext { private E env; private boolean bypass; private boolean complete; - private User caller; + private final Optional caller; - public ObserverContext(User caller) { + public ObserverContext(Optional caller) { this.caller = caller; } @@ -97,22 +96,19 @@ public class ObserverContext { } /** - * Returns the active user for the coprocessor call. - * If an explicit {@code User} instance was provided to the constructor, that will be returned, - * otherwise if we are in the context of an RPC call, the remote user is used. May return null - * if the execution is outside of an RPC context. + * Returns the active user for the coprocessor call. If an explicit {@code User} instance was + * provided to the constructor, that will be returned, otherwise if we are in the context of an + * RPC call, the remote user is used. May not be present if the execution is outside of an RPC + * context. */ - @Nullable - public User getCaller() { + public Optional getCaller() { return caller; } /** - * Instantiates a new ObserverContext instance if the passed reference is - * null and sets the environment in the new or existing instance. - * This allows deferring the instantiation of a ObserverContext until it is - * actually needed. - * + * Instantiates a new ObserverContext instance if the passed reference is null and + * sets the environment in the new or existing instance. This allows deferring the instantiation + * of a ObserverContext until it is actually needed. * @param env The coprocessor environment to set * @param context An existing ObserverContext instance to use, or null * to create a new instance @@ -121,34 +117,43 @@ public class ObserverContext { */ @Deprecated // TODO: Remove this method, ObserverContext should not depend on RpcServer - public static ObserverContext createAndPrepare( - E env, ObserverContext< E> context) { - if (context == null) { - context = new ObserverContext<>(RpcServer.getRequestUser()); - } - context.prepare(env); - return context; + public static ObserverContext createAndPrepare(E env, + ObserverContext context) { + return createAndPrepare(env, Optional.ofNullable(context), RpcServer.getRequestUser()); } /** - * Instantiates a new ObserverContext instance if the passed reference is - * null and sets the environment in the new or existing instance. - * This allows deferring the instantiation of a ObserverContext until it is - * actually needed. - * + * Instantiates a new ObserverContext instance if the passed reference is null and + * sets the environment in the new or existing instance. This allows deferring the instantiation + * of a ObserverContext until it is actually needed. * @param env The coprocessor environment to set - * @param context An existing ObserverContext instance to use, or null - * to create a new instance + * @param context An existing ObserverContext instance to use, or null to create a + * new instance + * @param user The requesting caller for the execution context + * @param The environment type for the context + * @return An instance of ObserverContext with the environment set + */ + @Deprecated + public static ObserverContext createAndPrepare(E env, + ObserverContext context, User user) { + return createAndPrepare(env, Optional.ofNullable(context), Optional.ofNullable(user)); + } + + /** + * Instantiates a new ObserverContext instance if the passed reference is not present and sets the + * environment in the new or existing instance. This allows deferring the instantiation of a + * ObserverContext until it is actually needed. + * @param env The coprocessor environment to set + * @param context An existing ObserverContext instance to use, or not present to create a new + * instance * @param user The requesting caller for the execution context * @param The environment type for the context * @return An instance of ObserverContext with the environment set */ public static ObserverContext createAndPrepare( - E env, ObserverContext context, User user) { - if (context == null) { - context = new ObserverContext<>(user); - } - context.prepare(env); - return context; + E env, Optional> context, Optional user) { + ObserverContext ctx = context.orElse(new ObserverContext<>(user)); + ctx.prepare(env); + return ctx; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index a8d2208..d4fc706 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; import java.nio.channels.ClosedChannelException; +import java.util.Optional; import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CellScanner; @@ -107,9 +108,9 @@ public class CallRunner { this.status.setStatus("Setting up call"); this.status.setConnection(call.getRemoteAddress().getHostAddress(), call.getRemotePort()); if (RpcServer.LOG.isTraceEnabled()) { - User remoteUser = call.getRequestUser(); + Optional remoteUser = call.getRequestUser(); RpcServer.LOG.trace(call.toShortString() + " executing as " + - ((remoteUser == null) ? "NULL principal" : remoteUser.getName())); + (remoteUser.isPresent() ? "NULL principal" : remoteUser.get().getName())); } Throwable errorThrowable = null; String error = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java index f536e7c..7c0ae8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/PriorityFunction.java @@ -17,12 +17,15 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +import java.util.Optional; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.HBaseInterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.security.User; /** * Function to figure priority of incoming request. @@ -38,7 +41,7 @@ public interface PriorityFunction { * @param user * @return Priority of this request. */ - int getPriority(RequestHeader header, Message param, User user); + int getPriority(RequestHeader header, Message param, Optional user); /** * Returns the deadline of the specified request. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index df3befd..6a4d3a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetAddress; +import java.util.Optional; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; @@ -48,16 +49,18 @@ public interface RpcCallContext { boolean isClientCellBlockSupported(); /** - * Returns the user credentials associated with the current RPC request or - * null if no credentials were provided. + * Returns the user credentials associated with the current RPC request or not present if no + * credentials were provided. * @return A User */ - User getRequestUser(); + Optional getRequestUser(); /** - * @return Current request's user name or null if none ongoing. + * @return Current request's user name or not present if none ongoing. */ - String getRequestUserName(); + default Optional getRequestUserName() { + return getRequestUser().map(User::getShortName); + } /** * @return Address of remote client in this call diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 22a0cf2..36d383a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; -import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; - import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -35,6 +33,7 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.concurrent.atomic.LongAdder; import org.apache.commons.logging.Log; @@ -46,8 +45,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.io.ByteBufferPool; @@ -62,14 +59,6 @@ import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -77,8 +66,20 @@ import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.codehaus.jackson.map.ObjectMapper; +import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; + /** * An RPC server that hosts protobuf described Services. * @@ -678,8 +679,8 @@ public abstract class RpcServer implements RpcServerInterface, * call. * @return An RpcCallContext backed by the currently ongoing call (gotten from a thread local) */ - public static RpcCall getCurrentCall() { - return CurCall.get(); + public static Optional getCurrentCall() { + return Optional.ofNullable(CurCall.get()); } public static boolean isInRpcCallContext() { @@ -687,13 +688,13 @@ public abstract class RpcServer implements RpcServerInterface, } /** - * Returns the user credentials associated with the current RPC request or - * null if no credentials were provided. + * Returns the user credentials associated with the current RPC request or not present if no + * credentials were provided. * @return A User */ - public static User getRequestUser() { - RpcCallContext ctx = getCurrentCall(); - return ctx == null? null: ctx.getRequestUser(); + public static Optional getRequestUser() { + Optional ctx = getCurrentCall(); + return ctx.isPresent() ? ctx.get().getRequestUser() : Optional.empty(); } /** @@ -704,19 +705,17 @@ public abstract class RpcServer implements RpcServerInterface, /** * Returns the username for any user associated with the current RPC - * request or null if no user is set. + * request or not present if no user is set. */ - public static String getRequestUserName() { - User user = getRequestUser(); - return user == null? null: user.getShortName(); + public static Optional getRequestUserName() { + return getRequestUser().map(User::getShortName); } /** * @return Address of remote client if a request is ongoing, else null */ - public static InetAddress getRemoteAddress() { - RpcCallContext ctx = getCurrentCall(); - return ctx == null? null: ctx.getRemoteAddress(); + public static Optional getRemoteAddress() { + return getCurrentCall().map(RpcCall::getRemoteAddress); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index 2baba9f..d52f7b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -22,6 +22,7 @@ import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -82,7 +83,7 @@ abstract class ServerCall implements RpcCall, Rpc protected ByteBufferListOutputStream cellBlockStream = null; protected CallCleanup reqCleanup = null; - protected User user; + protected final Optional user; protected final InetAddress remoteAddress; protected RpcCallback rpcCallback; @@ -110,10 +111,14 @@ abstract class ServerCall implements RpcCall, Rpc this.isError = false; this.size = size; this.tinfo = tinfo; - this.user = connection == null ? null : connection.user; // FindBugs: NP_NULL_ON_SOME_PATH + if (connection != null) { + this.user = Optional.ofNullable(connection.user); + this.retryImmediatelySupported = connection.retryImmediatelySupported; + } else { + this.user = Optional.empty(); + this.retryImmediatelySupported = false; + } this.remoteAddress = remoteAddress; - this.retryImmediatelySupported = - connection == null ? false : connection.retryImmediatelySupported; this.timeout = timeout; this.deadline = this.timeout > 0 ? this.receiveTime + this.timeout : Long.MAX_VALUE; this.reservoir = reservoir; @@ -432,17 +437,11 @@ abstract class ServerCall implements RpcCall, Rpc } @Override - public User getRequestUser() { + public Optional getRequestUser() { return user; } @Override - public String getRequestUserName() { - User user = getRequestUser(); - return user == null? null: user.getShortName(); - } - - @Override public InetAddress getRemoteAddress() { return remoteAddress; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 5431ece..688e1cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.Set; import com.google.protobuf.Service; @@ -161,7 +162,7 @@ public class MasterCoprocessorHost } public MasterObserverOperation(User user) { - super(masterObserverGetter, user); + super(masterObserverGetter, Optional.ofNullable(user)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java index fe3b9b4..f294f57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java @@ -110,11 +110,7 @@ public class MasterProcedureEnv implements ConfigurationObserver { } public User getRequestUser() { - User user = RpcServer.getRequestUser(); - if (user == null) { - user = Superusers.getSystemUser(); - } - return user; + return RpcServer.getRequestUser().orElse(Superusers.getSystemUser()); } public MasterServices getMasterServices() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 7bd069b..20a4f39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -41,8 +41,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; @@ -65,13 +63,8 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; -import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; @@ -87,8 +80,16 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.KeyLocker; import org.apache.hadoop.hbase.util.NonceKey; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.apache.zookeeper.KeeperException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type; + /** * This class manages the procedure of taking and restoring snapshots. There is only one * SnapshotManager for the master. @@ -586,10 +587,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable if (!snapshot.hasVersion()) { builder.setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION); } - User user = RpcServer.getRequestUser(); - if (User.isHBaseSecurityEnabled(master.getConfiguration()) && user != null) { - builder.setOwner(user.getShortName()); - } + RpcServer.getRequestUser().ifPresent(user -> { + if (User.isHBaseSecurityEnabled(master.getConfiguration())) { + builder.setOwner(user.getShortName()); + } + }); snapshot = builder.build(); // call pre coproc hook diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index e8ada97..199f7fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -22,6 +22,7 @@ import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Date; import java.util.List; +import java.util.Optional; import java.util.OptionalInt; import org.apache.commons.logging.Log; @@ -106,7 +107,7 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { @Override public List compact(CompactionRequest request, ThroughputController throughputController, - User user) throws IOException { + Optional user) throws IOException { return compact(request, scannerFactory, writerFactory, throughputController, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java index f51e605..69253d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java @@ -176,13 +176,7 @@ public class RegionServerRpcQuotaManager { private OperationQuota checkQuota(final Region region, final int numWrites, final int numReads, final int numScans) throws IOException, ThrottlingException { - User user = RpcServer.getRequestUser(); - UserGroupInformation ugi; - if (user != null) { - ugi = user.getUGI(); - } else { - ugi = User.getCurrent().getUGI(); - } + UserGroupInformation ugi = RpcServer.getRequestUser().orElse(User.getCurrent()).getUGI(); TableName table = region.getTableDescriptor().getTableName(); OperationQuota quota = getQuota(ugi, table); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java index 377b685..2d5a4e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java @@ -20,14 +20,20 @@ package org.apache.hadoop.hbase.regionserver; import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.ipc.QosPriority; +import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -39,11 +45,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanReques import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; -import org.apache.hadoop.hbase.security.User; - /** * Reads special method annotations and table names to figure a priority for use by QoS facility in * ipc; e.g: rpcs to hbase:meta get priority. @@ -165,7 +166,7 @@ public class AnnotationReadingPriorityFunction implements PriorityFunction { * NORMAL_QOS (user requests). */ @Override - public int getPriority(RequestHeader header, Message param, User user) { + public int getPriority(RequestHeader header, Message param, Optional user) { int priorityByAnnotation = getAnnotatedPriority(header); if (priorityByAnnotation >= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index e193dcb..fd1f6ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -236,17 +236,18 @@ public class CompactSplit implements PropagatingConfigurationObserver { } public synchronized void requestCompaction(HRegion region, String why, int priority, - CompactionLifeCycleTracker tracker, User user) throws IOException { + CompactionLifeCycleTracker tracker, Optional user) throws IOException { requestCompactionInternal(region, why, priority, true, tracker, user); } public synchronized void requestCompaction(HRegion region, HStore store, String why, int priority, - CompactionLifeCycleTracker tracker, User user) throws IOException { + CompactionLifeCycleTracker tracker, Optional user) throws IOException { requestCompactionInternal(region, store, why, priority, true, tracker, user); } private void requestCompactionInternal(HRegion region, String why, int priority, - boolean selectNow, CompactionLifeCycleTracker tracker, User user) throws IOException { + boolean selectNow, CompactionLifeCycleTracker tracker, Optional user) + throws IOException { // request compaction on all stores for (HStore store : region.stores.values()) { requestCompactionInternal(region, store, why, priority, selectNow, tracker, user); @@ -254,7 +255,7 @@ public class CompactSplit implements PropagatingConfigurationObserver { } private void requestCompactionInternal(HRegion region, HStore store, String why, int priority, - boolean selectNow, CompactionLifeCycleTracker tracker, User user) throws IOException { + boolean selectNow, CompactionLifeCycleTracker tracker, Optional user) throws IOException { if (this.server.isStopped() || (region.getTableDescriptor() != null && !region.getTableDescriptor().isCompactionEnabled())) { return; @@ -313,7 +314,7 @@ public class CompactSplit implements PropagatingConfigurationObserver { } private Optional selectCompaction(HRegion region, HStore store, int priority, - CompactionLifeCycleTracker tracker, User user) throws IOException { + CompactionLifeCycleTracker tracker, Optional user) throws IOException { Optional compaction = store.requestCompaction(priority, tracker, user); if (!compaction.isPresent() && LOG.isDebugEnabled() && region.getRegionInfo() != null) { LOG.debug("Not compacting " + region.getRegionInfo().getRegionNameAsString() + @@ -449,11 +450,11 @@ public class CompactSplit implements PropagatingConfigurationObserver { private final Optional compaction; private int queuedPriority; private ThreadPoolExecutor parent; - private User user; + private Optional user; private long time; public CompactionRunner(HStore store, HRegion region, Optional compaction, - ThreadPoolExecutor parent, User user) { + ThreadPoolExecutor parent, Optional user) { super(); this.store = store; this.region = region; @@ -472,7 +473,7 @@ public class CompactSplit implements PropagatingConfigurationObserver { ", priority = " + queuedPriority + ", time = " + time); } - private void doCompaction(User user) { + private void doCompaction(Optional user) { CompactionContext c; // Common case - system compaction without a file selection. Select now. if (!compaction.isPresent()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java index f7c18f9..b932629 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -89,7 +90,8 @@ public class DateTieredStoreEngine extends StoreEngine compact(ThroughputController throughputController, User user) + @Override + public List compact(ThroughputController throughputController, Optional user) throws IOException { if (request instanceof DateTieredCompactionRequest) { return compactor.compact(request, ((DateTieredCompactionRequest) request).getBoundaries(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java index 58f8bbb..053f12a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -121,7 +122,7 @@ public class DefaultStoreEngine extends StoreEngine< } @Override - public List compact(ThroughputController throughputController, User user) + public List compact(ThroughputController throughputController, Optional user) throws IOException { return compactor.compact(request, throughputController, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d059977..937ed11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -135,7 +135,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcCall; -import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -2008,11 +2007,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi */ public boolean compact(CompactionContext compaction, HStore store, ThroughputController throughputController) throws IOException { - return compact(compaction, store, throughputController, null); + return compact(compaction, store, throughputController, Optional.empty()); } public boolean compact(CompactionContext compaction, HStore store, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { assert compaction != null && compaction.hasSelection(); assert !compaction.getRequest().getFiles().isEmpty(); if (this.closing.get() || this.closed.get()) { @@ -5375,12 +5374,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi int timeout = rowLockWaitDuration; boolean reachDeadlineFirst = false; - RpcCall call = RpcServer.getCurrentCall(); - if (call != null && call.getDeadline() < Long.MAX_VALUE) { - int timeToDeadline = (int)(call.getDeadline() - System.currentTimeMillis()); - if (timeToDeadline <= this.rowLockWaitDuration) { - reachDeadlineFirst = true; - timeout = timeToDeadline; + Optional call = RpcServer.getCurrentCall(); + if (call.isPresent()) { + long deadline = call.get().getDeadline(); + if (deadline < Long.MAX_VALUE) { + int timeToDeadline = (int) (deadline - System.currentTimeMillis()); + if (timeToDeadline <= this.rowLockWaitDuration) { + reachDeadlineFirst = true; + timeout = timeToDeadline; + } } } @@ -6085,7 +6087,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (scannerContext == null) { throw new IllegalArgumentException("Scanner context cannot be null"); } - RpcCallContext rpcCall = RpcServer.getCurrentCall(); + Optional rpcCall = RpcServer.getCurrentCall(); // Save the initial progress from the Scanner context in these local variables. The progress // may need to be reset a few times if rows are being filtered out so we save the initial @@ -6110,13 +6112,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } else { scannerContext.clearProgress(); } - - if (rpcCall != null) { + if (rpcCall.isPresent()) { // If a user specifies a too-restrictive or too-slow scanner, the // client might time out and disconnect while the server side // is still processing the request. We should abort aggressively // in that case. - long afterTime = rpcCall.disconnectSince(); + long afterTime = rpcCall.get().disconnectSince(); if (afterTime >= 0) { throw new CallerDisconnectedException( "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + @@ -8087,14 +8088,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public void requestCompaction(String why, int priority, CompactionLifeCycleTracker tracker, - User user) throws IOException { + Optional user) throws IOException { ((HRegionServer) rsServices).compactSplitThread.requestCompaction(this, why, priority, tracker, user); } @Override public void requestCompaction(byte[] family, String why, int priority, - CompactionLifeCycleTracker tracker, User user) throws IOException { + CompactionLifeCycleTracker tracker, Optional user) throws IOException { ((HRegionServer) rsServices).compactSplitThread.requestCompaction(this, Preconditions.checkNotNull(stores.get(family)), why, priority, tracker, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 5ef0358..07da5f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -41,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; @@ -1793,11 +1794,11 @@ public class HRegionServer extends HasThread implements majorCompactPriority > hr.getCompactPriority()) { this.instance.compactSplitThread.requestCompaction(hr, s, getName() + " requests major compaction; use default priority", Store.NO_PRIORITY, - CompactionLifeCycleTracker.DUMMY, null); + CompactionLifeCycleTracker.DUMMY, Optional.empty()); } else { this.instance.compactSplitThread.requestCompaction(hr, s, getName() + " requests major compaction; use configured priority", - this.majorCompactPriority, CompactionLifeCycleTracker.DUMMY, null); + this.majorCompactPriority, CompactionLifeCycleTracker.DUMMY, Optional.empty()); } } } catch (IOException e) { @@ -2111,7 +2112,7 @@ public class HRegionServer extends HasThread implements } @Override - public void stop(final String msg) { + public void stop(String msg) { stop(msg, false, RpcServer.getRequestUser()); } @@ -2121,7 +2122,7 @@ public class HRegionServer extends HasThread implements * @param force True if this is a regionserver abort * @param user The user executing the stop request, or null if no user is associated */ - public void stop(final String msg, final boolean force, final User user) { + public void stop(String msg, boolean force, Optional user) { if (!this.stopped) { LOG.info("***** STOPPING region server '" + this + "' *****"); if (this.rsHost != null) { @@ -2388,7 +2389,7 @@ public class HRegionServer extends HasThread implements LOG.warn("Unable to report fatal error to master", t); } // shutdown should be run as the internal user - stop(reason, true, null); + stop(reason, true, Optional.empty()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index f232ddd..040bf0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1346,7 +1346,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat * @return Storefile we compacted into or null if we failed or opted out early. */ public List compact(CompactionContext compaction, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { assert compaction != null; List sfs = null; CompactionRequest cr = compaction.getRequest(); @@ -1418,7 +1418,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } private List moveCompatedFilesIntoPlace(CompactionRequest cr, List newFiles, - User user) throws IOException { + Optional user) throws IOException { List sfs = new ArrayList<>(newFiles.size()); for (Path newFile : newFiles) { assert newFile != null; @@ -1661,11 +1661,11 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat } public Optional requestCompaction() throws IOException { - return requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null); + return requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, Optional.empty()); } public Optional requestCompaction(int priority, - CompactionLifeCycleTracker tracker, User user) throws IOException { + CompactionLifeCycleTracker tracker, Optional user) throws IOException { // don't even select for compaction if writes are disabled if (!this.areWritesEnabled()) { return Optional.empty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index dba314d..b98fc35 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableMap; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; @@ -1448,7 +1449,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } @Override - public int getPriority(RequestHeader header, Message param, User user) { + public int getPriority(RequestHeader header, Message param, Optional user) { return priority.getPriority(header, param, user); } @@ -1547,11 +1548,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Quota support is enabled, the requesting user is not system/super user // and a quota policy is enforced that disables compactions. if (QuotaUtil.isQuotaEnabled(getConfiguration()) && - !Superusers.isSuperUser(RpcServer.getRequestUser()) && - this.regionServer.getRegionServerSpaceQuotaManager().areCompactionsDisabled( - region.getTableDescriptor().getTableName())) { - throw new DoNotRetryIOException("Compactions on this region are " - + "disabled due to a space quota violation."); + !Superusers.isSuperUser(RpcServer.getRequestUser().orElse(null)) && + this.regionServer.getRegionServerSpaceQuotaManager() + .areCompactionsDisabled(region.getTableDescriptor().getTableName())) { + throw new DoNotRetryIOException( + "Compactions on this region are " + "disabled due to a space quota violation."); } region.startRegionOperation(Operation.COMPACT_REGION); LOG.info("Compacting " + region.getRegionInfo().getRegionNameAsString()); @@ -2407,7 +2408,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ClientProtos.Get get = request.getGet(); Boolean existence = null; Result r = null; - RpcCallContext context = RpcServer.getCurrentCall(); + RpcCallContext context = RpcServer.getCurrentCall().orElse(null); quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.GET); Get clientGet = ProtobufUtil.toGet(get); @@ -2558,7 +2559,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder(); Boolean processed = null; RegionScannersCloseCallBack closeCallBack = null; - RpcCallContext context = RpcServer.getCurrentCall(); + RpcCallContext context = RpcServer.getCurrentCall().orElse(null); this.rpcMultiRequestCount.increment(); this.requestCount.increment(); Map regionStats = new HashMap<>(request @@ -2689,7 +2690,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, HBaseRpcController controller = (HBaseRpcController)rpcc; CellScanner cellScanner = controller != null ? controller.cellScanner() : null; OperationQuota quota = null; - RpcCallContext context = RpcServer.getCurrentCall(); + RpcCallContext context = RpcServer.getCurrentCall().orElse(null); ActivePolicyEnforcement spaceQuotaEnforcement = null; MutationType type = null; long before = EnvironmentEdgeManager.currentTime(); @@ -3269,7 +3270,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { rows = closeScanner ? 0 : 1; } - RpcCallContext context = RpcServer.getCurrentCall(); + RpcCallContext context = RpcServer.getCurrentCall().orElse(null); // now let's do the real scan. long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable()); RegionScanner scanner = rsh.s; @@ -3281,7 +3282,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { limitOfRows = -1; } - MutableObject lastBlock = new MutableObject(); + MutableObject lastBlock = new MutableObject<>(); boolean scannerClosed = false; try { List results = new ArrayList<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index f410e57..e2d40fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -762,14 +763,14 @@ public interface Region extends ConfigurationObserver { /** * Request compaction on this region. */ - void requestCompaction(String why, int priority, CompactionLifeCycleTracker tracker, User user) - throws IOException; + void requestCompaction(String why, int priority, CompactionLifeCycleTracker tracker, + Optional user) throws IOException; /** * Request compaction for the given family */ void requestCompaction(byte[] family, String why, int priority, - CompactionLifeCycleTracker tracker, User user) throws IOException; + CompactionLifeCycleTracker tracker, Optional user) throws IOException; /** Wait for all current flushes and compactions of the region to complete */ void waitForFlushesAndCompactions(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 1d9abca..2a468b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -438,14 +439,14 @@ public class RegionCoprocessorHost super(regionObserverGetter); } - public RegionObserverOperation(User user) { + public RegionObserverOperation(Optional user) { super(regionObserverGetter, user); } } abstract class BulkLoadObserverOperation extends ObserverOperationWithoutResult { - public BulkLoadObserverOperation(User user) { + public BulkLoadObserverOperation(Optional user) { super(RegionCoprocessor::getBulkLoadObserver, user); } } @@ -546,19 +547,19 @@ public class RegionCoprocessorHost * {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, * InternalScanner, CompactionLifeCycleTracker, long)} */ - public InternalScanner preCompactScannerOpen(final HStore store, - final List scanners, final ScanType scanType, final long earliestPutTs, - final CompactionLifeCycleTracker tracker, final User user, final long readPoint) - throws IOException { - return execOperationWithResult(null, coprocEnvironments.isEmpty() ? null : - new ObserverOperationWithResult( - regionObserverGetter, user) { - @Override - public InternalScanner call(RegionObserver observer) throws IOException { - return observer.preCompactScannerOpen(this, store, scanners, scanType, - earliestPutTs, getResult(), tracker, readPoint); - } - }); + public InternalScanner preCompactScannerOpen(HStore store, List scanners, + ScanType scanType, long earliestPutTs, CompactionLifeCycleTracker tracker, + Optional user, long readPoint) throws IOException { + return execOperationWithResult(null, + coprocEnvironments.isEmpty() ? null + : new ObserverOperationWithResult(regionObserverGetter, + user) { + @Override + public InternalScanner call(RegionObserver observer) throws IOException { + return observer.preCompactScannerOpen(this, store, scanners, scanType, earliestPutTs, + getResult(), tracker, readPoint); + } + }); } /** @@ -570,8 +571,8 @@ public class RegionCoprocessorHost * @return If {@code true}, skip the normal selection process and use the current list * @throws IOException */ - public boolean preCompactSelection(final HStore store, final List candidates, - final CompactionLifeCycleTracker tracker, final User user) throws IOException { + public boolean preCompactSelection(HStore store, List candidates, + CompactionLifeCycleTracker tracker, Optional user) throws IOException { return execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override public void call(RegionObserver observer) throws IOException { @@ -587,8 +588,8 @@ public class RegionCoprocessorHost * @param selected The store files selected to compact * @param tracker used to track the life cycle of a compaction */ - public void postCompactSelection(final HStore store, final ImmutableList selected, - final CompactionLifeCycleTracker tracker, final User user) throws IOException { + public void postCompactSelection(HStore store, ImmutableList selected, + CompactionLifeCycleTracker tracker, Optional user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override public void call(RegionObserver observer) throws IOException { @@ -605,17 +606,17 @@ public class RegionCoprocessorHost * @param tracker used to track the life cycle of a compaction * @throws IOException */ - public InternalScanner preCompact(final HStore store, final InternalScanner scanner, - final ScanType scanType, final CompactionLifeCycleTracker tracker, final User user) - throws IOException { - return execOperationWithResult(false, scanner, coprocEnvironments.isEmpty() ? null : - new ObserverOperationWithResult( - regionObserverGetter, user) { - @Override - public InternalScanner call(RegionObserver observer) throws IOException { - return observer.preCompact(this, store, getResult(), scanType, tracker); - } - }); + public InternalScanner preCompact(HStore store, InternalScanner scanner, ScanType scanType, + CompactionLifeCycleTracker tracker, Optional user) throws IOException { + return execOperationWithResult(false, scanner, + coprocEnvironments.isEmpty() ? null + : new ObserverOperationWithResult(regionObserverGetter, + user) { + @Override + public InternalScanner call(RegionObserver observer) throws IOException { + return observer.preCompact(this, store, getResult(), scanType, tracker); + } + }); } /** @@ -625,8 +626,8 @@ public class RegionCoprocessorHost * @param tracker used to track the life cycle of a compaction * @throws IOException */ - public void postCompact(final HStore store, final HStoreFile resultFile, - final CompactionLifeCycleTracker tracker, final User user) throws IOException { + public void postCompact(HStore store, HStoreFile resultFile, CompactionLifeCycleTracker tracker, + Optional user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperation(user) { @Override public void call(RegionObserver observer) throws IOException { @@ -1488,7 +1489,7 @@ public class RegionCoprocessorHost ///////////////////////////////////////////////////////////////////////////////////////////////// // BulkLoadObserver hooks ///////////////////////////////////////////////////////////////////////////////////////////////// - public void prePrepareBulkLoad(User user) throws IOException { + public void prePrepareBulkLoad(Optional user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new BulkLoadObserverOperation(user) { @Override protected void call(BulkLoadObserver observer) throws IOException { @@ -1497,7 +1498,7 @@ public class RegionCoprocessorHost }); } - public void preCleanupBulkLoad(User user) throws IOException { + public void preCleanupBulkLoad(Optional user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new BulkLoadObserverOperation(user) { @Override protected void call(BulkLoadObserver observer) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java index b083b3f..cef414c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.Optional; import com.google.protobuf.Service; import org.apache.commons.logging.Log; @@ -98,7 +99,7 @@ public class RegionServerCoprocessorHost extends super(rsObserverGetter); } - public RegionServerObserverOperation(User user) { + public RegionServerObserverOperation(Optional user) { super(rsObserverGetter, user); } } @@ -106,8 +107,7 @@ public class RegionServerCoprocessorHost extends ////////////////////////////////////////////////////////////////////////////////////////////////// // RegionServerObserver operations ////////////////////////////////////////////////////////////////////////////////////////////////// - - public void preStop(String message, User user) throws IOException { + public void preStop(String message, Optional user) throws IOException { // While stopping the region server all coprocessors method should be executed first then the // coprocessor should be cleaned up. execShutdown(coprocEnvironments.isEmpty() ? null : new RegionServerObserverOperation(user) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java index c7d0ead..2f0f0dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,9 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; +import java.io.IOException; +import java.math.BigInteger; +import java.security.PrivilegedAction; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -29,19 +38,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest; import org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; @@ -55,15 +53,12 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; -import java.math.BigInteger; -import java.security.PrivilegedAction; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest; /** * Bulk loads in secure mode. @@ -140,17 +135,19 @@ public class SecureBulkLoadManager { public String prepareBulkLoad(final Region region, final PrepareBulkLoadRequest request) throws IOException { - region.getCoprocessorHost().prePrepareBulkLoad(getActiveUser()); + User user = getActiveUser(); + region.getCoprocessorHost().prePrepareBulkLoad(Optional.of(user)); - String bulkToken = createStagingDir(baseStagingDir, getActiveUser(), - region.getTableDescriptor().getTableName()).toString(); + String bulkToken = + createStagingDir(baseStagingDir, user, region.getTableDescriptor().getTableName()) + .toString(); return bulkToken; } public void cleanupBulkLoad(final Region region, final CleanupBulkLoadRequest request) throws IOException { - region.getCoprocessorHost().preCleanupBulkLoad(getActiveUser()); + region.getCoprocessorHost().preCleanupBulkLoad(Optional.of(getActiveUser())); Path path = new Path(request.getBulkToken()); if (!fs.delete(path, true)) { @@ -275,16 +272,12 @@ public class SecureBulkLoadManager { } private User getActiveUser() throws IOException { - User user = RpcServer.getRequestUser(); - if (user == null) { - // for non-rpc handling, fallback to system user - user = userProvider.getCurrent(); - } - - //this is for testing - if (userProvider.isHadoopSecurityEnabled() - && "simple".equalsIgnoreCase(conf.get(User.HBASE_SECURITY_CONF_KEY))) { - return User.createUserForTesting(conf, user.getShortName(), new String[]{}); + // for non-rpc handling, fallback to system user + User user = RpcServer.getRequestUser().orElse(userProvider.getCurrent()); + // this is for testing + if (userProvider.isHadoopSecurityEnabled() && + "simple".equalsIgnoreCase(conf.get(User.HBASE_SECURITY_CONF_KEY))) { + return User.createUserForTesting(conf, user.getShortName(), new String[] {}); } return user; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index 39f142f..2eef3a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -100,7 +101,7 @@ public class StripeStoreEngine extends StoreEngine compact(ThroughputController throughputController, User user) + public List compact(ThroughputController throughputController, Optional user) throws IOException { Preconditions.checkArgument(this.stripeRequest != null, "Cannot compact without selection"); return this.stripeRequest.execute(compactor, throughputController, user); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java index d0b0731..e4935a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionContext.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.List; +import java.util.Optional; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.HStoreFile; @@ -65,7 +66,7 @@ public abstract class CompactionContext { this.request = request; } - public abstract List compact(ThroughputController throughputController, User user) + public abstract List compact(ThroughputController throughputController, Optional user) throws IOException; public CompactionRequest getRequest() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 2c9a519..6015f6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.OptionalInt; import org.apache.commons.logging.Log; @@ -267,9 +268,9 @@ public abstract class Compactor { /* includesTags = */fd.maxTagsLength > 0, shouldDropBehind); } - protected List compact(final CompactionRequest request, - InternalScannerFactory scannerFactory, CellSinkFactory sinkFactory, - ThroughputController throughputController, User user) throws IOException { + protected List compact(CompactionRequest request, InternalScannerFactory scannerFactory, + CellSinkFactory sinkFactory, ThroughputController throughputController, + Optional user) throws IOException { FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles()); this.progress = new CompactionProgress(fd.maxKeyCount); @@ -341,7 +342,7 @@ public abstract class Compactor { * @return Scanner override by coprocessor; null if not overriding. */ protected InternalScanner preCreateCoprocScanner(CompactionRequest request, ScanType scanType, - long earliestPutTs, List scanners, User user, long readPoint) + long earliestPutTs, List scanners, Optional user, long readPoint) throws IOException { if (store.getCoprocessorHost() == null) { return null; @@ -358,7 +359,7 @@ public abstract class Compactor { * @return Scanner scanner to use (usually the default); null if compaction should not proceed. */ protected InternalScanner postCreateCoprocScanner(CompactionRequest request, ScanType scanType, - InternalScanner scanner, User user) throws IOException { + InternalScanner scanner, Optional user) throws IOException { if (store.getCoprocessorHost() == null) { return scanner; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index d0beed0..af3a6e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.OptionalLong; import org.apache.commons.logging.Log; @@ -55,7 +56,7 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor compact(final CompactionRequest request, final List lowerBoundaries, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing compaction with " + lowerBoundaries.size() + "windows, lower boundaries: " + lowerBoundaries); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 08951b4..25a6278 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.Collection; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -62,7 +63,7 @@ public class DefaultCompactor extends Compactor { * Do a minor/major compaction on an explicit set of storefiles from a Store. */ public List compact(final CompactionRequest request, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { return compact(request, defaultScannerFactory, writerFactory, throughputController, user); } @@ -80,7 +81,7 @@ public class DefaultCompactor extends Compactor { throws IOException { CompactionRequest cr = new CompactionRequest(filesToCompact); cr.setIsMajor(isMajor, isMajor); - return compact(cr, NoLimitThroughputController.INSTANCE, null); + return compact(cr, NoLimitThroughputController.INSTANCE, Optional.empty()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 9dc8246..9594b8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -381,7 +382,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { public List execute(StripeCompactor compactor, ThroughputController throughputController) throws IOException { - return execute(compactor, throughputController, null); + return execute(compactor, throughputController, Optional.empty()); } /** * Executes the request against compactor (essentially, just calls correct overload of @@ -390,7 +391,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { * @return result of compact(...) */ public abstract List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException; + ThroughputController throughputController, Optional user) throws IOException; public StripeCompactionRequest(CompactionRequest request) { this.request = request; @@ -441,8 +442,8 @@ public class StripeCompactionPolicy extends CompactionPolicy { } @Override - public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + public List execute(StripeCompactor compactor, ThroughputController throughputController, + Optional user) throws IOException { return compactor.compact(this.request, this.targetBoundaries, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } @@ -488,7 +489,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { @Override public List execute(StripeCompactor compactor, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { return compactor.compact(this.request, this.targetCount, this.targetKvs, this.startRow, this.endRow, this.majorRangeFromRow, this.majorRangeToRow, throughputController, user); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index f552f96..832077a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -76,9 +77,9 @@ public class StripeCompactor extends AbstractMultiOutputCompactor compact(CompactionRequest request, final List targetBoundaries, - final byte[] majorRangeFromRow, final byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + public List compact(CompactionRequest request, List targetBoundaries, + byte[] majorRangeFromRow, byte[] majorRangeToRow, ThroughputController throughputController, + Optional user) throws IOException { if (LOG.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); sb.append("Executing compaction with " + targetBoundaries.size() + " boundaries:"); @@ -103,7 +104,7 @@ public class StripeCompactor extends AbstractMultiOutputCompactor compact(CompactionRequest request, final int targetCount, final long targetSize, final byte[] left, final byte[] right, byte[] majorRangeFromRow, byte[] majorRangeToRow, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug( "Executing compaction with " + targetSize + " target file size, no more than " + targetCount diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index c8a089b..a4a278e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -18,6 +18,11 @@ */ package org.apache.hadoop.hbase.security.access; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; @@ -34,10 +39,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import com.google.protobuf.Message; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -120,13 +121,6 @@ import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.Permission.Action; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.MapMaker; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; -import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; @@ -137,6 +131,14 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.MapMaker; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; +import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; + /** * Provides basic authorization checks for data access and administrative * operations. @@ -403,13 +405,11 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, private void logResult(AuthResult result) { if (AUDITLOG.isTraceEnabled()) { - InetAddress remoteAddr = RpcServer.getRemoteAddress(); - AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") + - " for user " + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") + - "; reason: " + result.getReason() + - "; remote address: " + (remoteAddr != null ? remoteAddr : "") + - "; request: " + result.getRequest() + - "; context: " + result.toContextString()); + AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") + " for user " + + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") + "; reason: " + + result.getReason() + "; remote address: " + + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + + result.getRequest() + "; context: " + result.toContextString()); } } @@ -418,13 +418,9 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, * If we are in the context of an RPC call, the remote user is used, * otherwise the currently logged in user is used. */ - private User getActiveUser(ObserverContext ctx) throws IOException { - User user = ctx.getCaller(); - if (user == null) { - // for non-rpc handling, fallback to system user - user = userProvider.getCurrent(); - } - return user; + private User getActiveUser(ObserverContext ctx) throws IOException { + // for non-rpc handling, fallback to system user + return ctx.getCaller().orElse(userProvider.getCurrent()); } /** @@ -2164,9 +2160,10 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, * the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { - if (!RpcServer.isInRpcCallContext()) + if (!RpcServer.isInRpcCallContext()) { return; - String requestUserName = RpcServer.getRequestUserName(); + } + String requestUserName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUserName)) { throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); @@ -2256,7 +2253,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (LOG.isDebugEnabled()) { LOG.debug("Received request to grant access permission " + perm.toString()); } - User caller = RpcServer.getRequestUser(); + User caller = RpcServer.getRequestUser().orElse(null); switch(request.getUserPermission().getPermission().getType()) { case Global : @@ -2309,7 +2306,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (LOG.isDebugEnabled()) { LOG.debug("Received request to revoke access permission " + perm.toString()); } - User caller = RpcServer.getRequestUser(); + User caller = RpcServer.getRequestUser().orElse(null); switch(request.getUserPermission().getPermission().getType()) { case Global : @@ -2358,7 +2355,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (!initialized) { throw new CoprocessorException("AccessController not yet initialized"); } - User caller = RpcServer.getRequestUser(); + User caller = RpcServer.getRequestUser().orElse(null); List perms = null; if (request.getType() == AccessControlProtos.Permission.Type.Table) { @@ -2419,7 +2416,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, } AccessControlProtos.CheckPermissionsResponse response = null; try { - User user = RpcServer.getRequestUser(); + User user = RpcServer.getRequestUser().orElse(null); TableName tableName = regionEnv.getRegion().getTableDescriptor().getTableName(); for (Permission permission : permissions) { if (permission instanceof TablePermission) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index e1b0f84..830ab4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,9 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.util.Collections; @@ -37,10 +40,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; - -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; import org.apache.yetus.audience.InterfaceAudience; /** @@ -113,17 +112,12 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService throw new IOException( "No secret manager configured for token authentication"); } - - User currentUser = RpcServer.getRequestUser(); - UserGroupInformation ugi = null; - if (currentUser != null) { - ugi = currentUser.getUGI(); - } - if (currentUser == null) { - throw new AccessDeniedException("No authenticated user for request!"); - } else if (!isAllowedDelegationTokenOp(ugi)) { - LOG.warn("Token generation denied for user="+currentUser.getName() - +", authMethod="+ugi.getAuthenticationMethod()); + User currentUser = RpcServer.getRequestUser() + .orElseThrow(() -> new AccessDeniedException("No authenticated user for request!")); + UserGroupInformation ugi = currentUser.getUGI(); + if (!isAllowedDelegationTokenOp(ugi)) { + LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + + ugi.getAuthenticationMethod()); throw new AccessDeniedException( "Token generation only allowed for Kerberos authenticated clients"); } @@ -139,17 +133,16 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService @Override public void whoAmI(RpcController controller, AuthenticationProtos.WhoAmIRequest request, - RpcCallback done) { - User requestUser = RpcServer.getRequestUser(); + RpcCallback done) { AuthenticationProtos.WhoAmIResponse.Builder response = AuthenticationProtos.WhoAmIResponse.newBuilder(); - if (requestUser != null) { + RpcServer.getRequestUser().ifPresent(requestUser -> { response.setUsername(requestUser.getShortName()); AuthenticationMethod method = requestUser.getUGI().getAuthenticationMethod(); if (method != null) { response.setAuthMethod(method.name()); } - } + }); done.run(response.build()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 57b79a4..be04b18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -23,6 +23,11 @@ import static org.apache.hadoop.hbase.HConstants.OperationStatusCode.SUCCESS; import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_FAMILY; import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; +import com.google.protobuf.ByteString; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; @@ -51,7 +56,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Delete; @@ -82,8 +86,8 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsRequest; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; @@ -109,13 +113,10 @@ import org.apache.hadoop.hbase.security.access.AccessController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.com.google.common.collect.MapMaker; -import com.google.protobuf.ByteString; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcController; -import com.google.protobuf.Service; /** * Coprocessor that has both the MasterObserver and RegionObserver implemented that supports in @@ -663,7 +664,7 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { if (!RpcServer.isInRpcCallContext()) return; - String requestUName = RpcServer.getRequestUserName(); + String requestUName = RpcServer.getRequestUserName().orElse(null); String owner = scannerOwners.get(s); if (authorizationEnabled && owner != null && !owner.equals(requestUName)) { throw new AccessDeniedException("User '" + requestUName + "' is not the scanner owner!"); @@ -892,7 +893,6 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso List labelAuths, String regex) { if (AUDITLOG.isTraceEnabled()) { // This is more duplicated code! - InetAddress remoteAddr = RpcServer.getRemoteAddress(); List labelAuthsStr = new ArrayList<>(); if (labelAuths != null) { int labelAuthsSize = labelAuths.size(); @@ -909,11 +909,12 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso LOG.warn("Failed to get active system user."); LOG.debug("Details on failure to get active system user.", e); } - AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " - + (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " - + reason + "; remote address: " + (remoteAddr != null ? remoteAddr : "") + "; request: " - + request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " - + labelAuthsStr + "; regex: " + regex); + AUDITLOG.trace("Access " + (isAllowed ? "allowed" : "denied") + " for user " + + (requestingUser != null ? requestingUser.getShortName() : "UNKNOWN") + "; reason: " + + reason + "; remote address: " + + RpcServer.getRemoteAddress().map(InetAddress::toString).orElse("") + "; request: " + + request + "; user: " + (user != null ? Bytes.toShort(user) : "null") + "; labels: " + + labelAuthsStr + "; regex: " + regex); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index 0d83b64..964c0f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.TagType.VISIBILITY_TAG_TYPE; +import com.google.protobuf.ByteString; + import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -38,11 +40,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; @@ -60,11 +60,11 @@ import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.LeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode; import org.apache.hadoop.hbase.security.visibility.expression.Operator; -import com.google.protobuf.ByteString; import org.apache.hadoop.hbase.util.ByteRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility method to support visibility @@ -283,10 +283,7 @@ public class VisibilityUtils { * @throws IOException When there is IOE in getting the system user (During non-RPC handling). */ public static User getActiveUser() throws IOException { - User user = RpcServer.getRequestUser(); - if (user == null) { - user = User.getCurrent(); - } + User user = RpcServer.getRequestUser().orElse(User.getCurrent()); if (LOG.isTraceEnabled()) { LOG.trace("Current active user name is " + user.getShortName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java index 41b3118..698ca76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/QosTestHelper.java @@ -17,13 +17,16 @@ */ package org.apache.hadoop.hbase; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +import static org.junit.Assert.assertEquals; + +import java.util.Optional; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.regionserver.AnnotationReadingPriorityFunction; import org.apache.hadoop.hbase.security.User; -import static org.junit.Assert.assertEquals; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; public class QosTestHelper { protected void checkMethod(Configuration conf, final String methodName, final int expected, @@ -36,6 +39,6 @@ public class QosTestHelper { RPCProtos.RequestHeader.Builder builder = RPCProtos.RequestHeader.newBuilder(); builder.setMethodName(methodName); assertEquals(methodName, expected, qosf.getPriority(builder.build(), param, - User.createUserForTesting(conf, "someuser", new String[]{"somegroup"}))); + Optional.of(User.createUserForTesting(conf, "someuser", new String[]{"somegroup"})))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index b759261..cf60c59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Collection; import java.util.List; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import org.apache.commons.logging.Log; @@ -136,7 +137,7 @@ public class TestIOFencing { @Override public boolean compact(CompactionContext compaction, HStore store, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { try { return super.compact(compaction, store, throughputController, user); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index 2c9ab20..fb6ca67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -283,7 +283,7 @@ public class TestRegionObserverScannerOpenHook { @Override public boolean compact(CompactionContext compaction, HStore store, - ThroughputController throughputController, User user) throws IOException { + ThroughputController throughputController, Optional user) throws IOException { boolean ret = super.compact(compaction, store, throughputController, user); if (ret) compactionStateChangeLatch.countDown(); return ret; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java index aaffbfa..033ff67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java @@ -114,7 +114,7 @@ public class TestProtobufRpcServiceImpl implements BlockingInterface { @Override public AddrResponseProto addr(RpcController controller, EmptyRequestProto request) throws ServiceException { - return AddrResponseProto.newBuilder().setAddr(RpcServer.getRemoteAddress().getHostAddress()) - .build(); + return AddrResponseProto.newBuilder() + .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index b4f93c5..a6dcf59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -41,6 +41,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CountDownLatch; @@ -63,6 +64,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader import org.apache.hadoop.hbase.testclassification.RPCTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Classes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; @@ -150,9 +152,8 @@ public class TestSimpleRpcScheduler { scheduler.init(CONTEXT); scheduler.start(); for (CallRunner task : tasks) { - when(qosFunction.getPriority((RPCProtos.RequestHeader) anyObject(), - (Message) anyObject(), (User) anyObject())) - .thenReturn(qos.get(task)); + when(qosFunction.getPriority(any(RPCProtos.RequestHeader.class), (Message) any(Message.class), + any(Classes.> cast(Optional.class)))).thenReturn(qos.get(task)); scheduler.dispatch(task); } for (CallRunner task : tasks) { @@ -184,9 +185,8 @@ public class TestSimpleRpcScheduler { schedConf.set(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, queueType); PriorityFunction priority = mock(PriorityFunction.class); - when(priority.getPriority(any(RequestHeader.class), - any(Message.class), any(User.class))) - .thenReturn(HConstants.NORMAL_QOS); + when(priority.getPriority(any(RequestHeader.class), any(Message.class), + any(Classes.> cast(Optional.class)))).thenReturn(HConstants.NORMAL_QOS); RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 1, 1, 1, priority, HConstants.QOS_THRESHOLD); @@ -265,7 +265,7 @@ public class TestSimpleRpcScheduler { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(RequestHeader.class), any(Message.class), - any(User.class))).thenReturn(HConstants.NORMAL_QOS); + any(Classes.> cast(Optional.class)))).thenReturn(HConstants.NORMAL_QOS); RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 2, 1, 1, priority, HConstants.QOS_THRESHOLD); @@ -281,7 +281,7 @@ public class TestSimpleRpcScheduler { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(RPCProtos.RequestHeader.class), any(Message.class), - any(User.class))).thenReturn(HConstants.NORMAL_QOS); + any(Classes.> cast(Optional.class)))).thenReturn(HConstants.NORMAL_QOS); RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 3, 1, 1, priority, HConstants.QOS_THRESHOLD); @@ -374,7 +374,7 @@ public class TestSimpleRpcScheduler { PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(RequestHeader.class), any(Message.class), - any(User.class))).thenReturn(HConstants.NORMAL_QOS); + any(Classes.> cast(Optional.class)))).thenReturn(HConstants.NORMAL_QOS); SimpleRpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 0, 0, 0, priority, HConstants.QOS_THRESHOLD); try { @@ -440,7 +440,7 @@ public class TestSimpleRpcScheduler { RpcExecutor.CALL_QUEUE_TYPE_CODEL_CONF_VALUE); PriorityFunction priority = mock(PriorityFunction.class); when(priority.getPriority(any(RPCProtos.RequestHeader.class), any(Message.class), - any(User.class))).thenReturn(HConstants.NORMAL_QOS); + any(Classes.> cast(Optional.class)))).thenReturn(HConstants.NORMAL_QOS); SimpleRpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 1, 1, 1, priority, HConstants.QOS_THRESHOLD); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java index a1fe87b..05733d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/StatefulStoreMockMaker.java @@ -29,6 +29,7 @@ import java.util.Optional; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Classes; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -68,8 +69,8 @@ public class StatefulStoreMockMaker { public HStore createStoreMock(String name) throws Exception { HStore store = mock(HStore.class, name); - when(store.requestCompaction(anyInt(), any(CompactionLifeCycleTracker.class), any(User.class))) - .then(new SelectAnswer()); + when(store.requestCompaction(anyInt(), any(CompactionLifeCycleTracker.class), + any(Classes.> cast(Optional.class)))).then(new SelectAnswer()); when(store.getCompactPriority()).then(new PriorityAnswer()); doAnswer(new CancelAnswer()).when(store) .cancelRequestedCompaction(any(CompactionContext.class)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 349815f..b597649 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Classes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; @@ -308,7 +309,7 @@ public class TestCompaction { CountDownLatch latch = new CountDownLatch(1); Tracker tracker = new Tracker(latch); thread.requestCompaction(r, store, "test custom comapction", PRIORITY_USER, tracker, - null); + Optional.empty()); // wait for the latch to complete. latch.await(); @@ -341,7 +342,7 @@ public class TestCompaction { CountDownLatch latch = new CountDownLatch(1); Tracker tracker = new Tracker(latch); thread.requestCompaction(mockRegion, store, "test custom comapction", PRIORITY_USER, - tracker, null); + tracker, Optional.empty()); // wait for the latch to complete. latch.await(120, TimeUnit.SECONDS); @@ -381,7 +382,7 @@ public class TestCompaction { createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); thread.requestCompaction(r, store, "test mulitple custom comapctions", PRIORITY_USER, - tracker, null); + tracker, Optional.empty()); } // wait for the latch to complete. latch.await(); @@ -421,7 +422,7 @@ public class TestCompaction { } @Override - public List compact(ThroughputController throughputController, User user) + public List compact(ThroughputController throughputController, Optional user) throws IOException { finishCompaction(this.selectedFiles); return new ArrayList<>(); @@ -475,7 +476,7 @@ public class TestCompaction { } @Override - public List compact(ThroughputController throughputController, User user) + public List compact(ThroughputController throughputController, Optional user) throws IOException { try { isInCompact = true; @@ -557,16 +558,16 @@ public class TestCompaction { cst.shutdownLongCompactions(); // Set up the region mock that redirects compactions. HRegion r = mock(HRegion.class); - when( - r.compact(any(CompactionContext.class), any(HStore.class), - any(ThroughputController.class), any(User.class))).then(new Answer() { - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - invocation.getArgumentAt(0, CompactionContext.class).compact( - invocation.getArgumentAt(2, ThroughputController.class), null); - return true; - } - }); + when(r.compact(any(CompactionContext.class), any(HStore.class), any(ThroughputController.class), + any(Classes.> cast(Optional.class)))).then(new Answer() { + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + invocation.getArgumentAt(0, CompactionContext.class).compact( + invocation.getArgumentAt(2, ThroughputController.class), + invocation.getArgumentAt(3, Classes.> cast(Optional.class))); + return true; + } + }); // Set up store mocks for 2 "real" stores and the one we use for blocking CST. ArrayList results = new ArrayList<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 066e686..28fc6a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -540,9 +540,10 @@ public class TestHMobStore { // Trigger major compaction this.store.triggerMajorCompaction(); - Optional requestCompaction = - this.store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); - this.store.compact(requestCompaction.get(), NoLimitThroughputController.INSTANCE, null); + Optional requestCompaction = this.store.requestCompaction(PRIORITY_USER, + CompactionLifeCycleTracker.DUMMY, Optional.empty()); + this.store.compact(requestCompaction.get(), NoLimitThroughputController.INSTANCE, + Optional.empty()); Assert.assertEquals(1, this.store.getStorefiles().size()); //Check encryption after compaction diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 815166b..5cbfd36 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -39,6 +39,7 @@ import java.util.Iterator; import java.util.List; import java.util.ListIterator; import java.util.NavigableSet; +import java.util.Optional; import java.util.TreeSet; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.CountDownLatch; @@ -419,7 +420,8 @@ public class TestHStore { assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS); // after compact; check the lowest time stamp - store.compact(store.requestCompaction().get(), NoLimitThroughputController.INSTANCE, null); + store.compact(store.requestCompaction().get(), NoLimitThroughputController.INSTANCE, + Optional.empty()); lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles()); lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles()); assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 71f18c0..8d924f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -33,6 +33,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Map.Entry; import org.apache.commons.logging.Log; @@ -437,8 +438,8 @@ public class TestMajorCompaction { } store.triggerMajorCompaction(); CompactionRequest request = - store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get() - .getRequest(); + store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, Optional.empty()) + .get().getRequest(); assertNotNull("Expected to receive a compaction request", request); assertEquals( "User-requested major compaction should always occur, even if there are too many store files", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java index 4c4bae4..c99cb25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.Optional; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -100,8 +101,8 @@ public class TestPriorityRpc { Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(true); // Presume type. ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); - assertEquals(HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, getRequest, - User.createUserForTesting(regionServer.conf, "someuser", new String[]{"somegroup"}))); + assertEquals(HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, getRequest, Optional.of( + User.createUserForTesting(regionServer.conf, "someuser", new String[] { "somegroup" })))); } @Test @@ -114,8 +115,8 @@ public class TestPriorityRpc { headerBuilder.setMethodName("foo"); RequestHeader header = headerBuilder.build(); PriorityFunction qosFunc = regionServer.rpcServices.getPriority(); - assertEquals(HConstants.NORMAL_QOS, qosFunc.getPriority(header, null, - User.createUserForTesting(regionServer.conf, "someuser", new String[]{"somegroup"}))); + assertEquals(HConstants.NORMAL_QOS, qosFunc.getPriority(header, null, Optional.of( + User.createUserForTesting(regionServer.conf, "someuser", new String[] { "somegroup" })))); } @Test @@ -137,8 +138,8 @@ public class TestPriorityRpc { Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(false); // Presume type. ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); - int qos = priority.getPriority(header, scanRequest, - User.createUserForTesting(regionServer.conf, "someuser", new String[]{"somegroup"})); + int qos = priority.getPriority(header, scanRequest, Optional.of( + User.createUserForTesting(regionServer.conf, "someuser", new String[] { "somegroup" }))); assertTrue ("" + qos, qos == HConstants.NORMAL_QOS); //build a scan request with scannerID @@ -156,12 +157,12 @@ public class TestPriorityRpc { // Presume type. ((AnnotationReadingPriorityFunction)priority).setRegionServer(mockRS); - assertEquals(HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, scanRequest, - User.createUserForTesting(regionServer.conf, "someuser", new String[]{"somegroup"}))); + assertEquals(HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, scanRequest, Optional.of( + User.createUserForTesting(regionServer.conf, "someuser", new String[] { "somegroup" })))); //the same as above but with non-meta region Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(false); - assertEquals(HConstants.NORMAL_QOS, priority.getPriority(header, scanRequest, - User.createUserForTesting(regionServer.conf, "someuser", new String[]{"somegroup"}))); + assertEquals(HConstants.NORMAL_QOS, priority.getPriority(header, scanRequest, Optional.of( + User.createUserForTesting(regionServer.conf, "someuser", new String[] { "somegroup" })))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index a562af8..a52776f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -30,6 +30,7 @@ import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.Arrays; +import java.util.Optional; import java.util.OptionalLong; import org.apache.hadoop.conf.Configuration; @@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Classes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -77,9 +79,8 @@ public class TestStripeStoreEngine { se.setCompactorOverride(mockCompactor); when( mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), - any(byte[].class), any(byte[].class), any(byte[].class), - any(ThroughputController.class), any(User.class))) - .thenReturn(new ArrayList<>()); + any(byte[].class), any(byte[].class), any(byte[].class), any(ThroughputController.class), + any(Classes.> cast(Optional.class)))).thenReturn(new ArrayList<>()); // Produce 3 L0 files. HStoreFile sf = createFile(); @@ -97,10 +98,10 @@ public class TestStripeStoreEngine { assertEquals(2, compaction.getRequest().getFiles().size()); assertFalse(compaction.getRequest().getFiles().contains(sf)); // Make sure the correct method it called on compactor. - compaction.compact(NoLimitThroughputController.INSTANCE, null); + compaction.compact(NoLimitThroughputController.INSTANCE, Optional.empty()); verify(mockCompactor, times(1)).compact(compaction.getRequest(), targetCount, 0L, StripeStoreFileManager.OPEN_KEY, StripeStoreFileManager.OPEN_KEY, null, null, - NoLimitThroughputController.INSTANCE, null); + NoLimitThroughputController.INSTANCE, Optional.empty()); } private static HStoreFile createFile() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 1249fee..ac12bd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -42,6 +42,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.Optional; import java.util.OptionalLong; import org.apache.hadoop.conf.Configuration; @@ -74,6 +75,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Classes; import org.apache.hadoop.hbase.util.ConcatenatedLists; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; @@ -231,10 +233,10 @@ public class TestStripeCompactionPolicy { assertTrue(policy.needsCompactions(si, al())); StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false); assertEquals(si.getStorefiles(), scr.getRequest().getFiles()); - scr.execute(sc, NoLimitThroughputController.INSTANCE, null); + scr.execute(sc, NoLimitThroughputController.INSTANCE, Optional.empty()); verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY), - aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), - any(NoLimitThroughputController.class), any(User.class)); + aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), any(NoLimitThroughputController.class), + any(Classes.> cast(Optional.class))); } @Test @@ -486,7 +488,7 @@ public class TestStripeCompactionPolicy { // All the Stripes are expired, so the Compactor will not create any Writers. We need to create // an empty file to preserve metadata StripeCompactor sc = createCompactor(); - List paths = scr.execute(sc, NoLimitThroughputController.INSTANCE, null); + List paths = scr.execute(sc, NoLimitThroughputController.INSTANCE, Optional.empty()); assertEquals(1, paths.size()); } @@ -545,7 +547,7 @@ public class TestStripeCompactionPolicy { assertTrue(policy.needsCompactions(si, al())); StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false); verifyCollectionsEqual(sfs, scr.getRequest().getFiles()); - scr.execute(sc, NoLimitThroughputController.INSTANCE, null); + scr.execute(sc, NoLimitThroughputController.INSTANCE, Optional.empty()); verify(sc, times(1)).compact(eq(scr.getRequest()), argThat(new ArgumentMatcher>() { @Override public boolean matches(Object argument) { @@ -559,7 +561,7 @@ public class TestStripeCompactionPolicy { } }), dropDeletesFrom == null ? isNull(byte[].class) : aryEq(dropDeletesFrom), dropDeletesTo == null ? isNull(byte[].class) : aryEq(dropDeletesTo), - any(NoLimitThroughputController.class), any(User.class)); + any(NoLimitThroughputController.class), any(Classes.> cast(Optional.class))); } /** @@ -580,12 +582,12 @@ public class TestStripeCompactionPolicy { assertTrue(!needsCompaction || policy.needsCompactions(si, al())); StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false); verifyCollectionsEqual(sfs, scr.getRequest().getFiles()); - scr.execute(sc, NoLimitThroughputController.INSTANCE, null); + scr.execute(sc, NoLimitThroughputController.INSTANCE, Optional.empty()); verify(sc, times(1)).compact(eq(scr.getRequest()), count == null ? anyInt() : eq(count.intValue()), size == null ? anyLong() : eq(size.longValue()), aryEq(start), aryEq(end), dropDeletesMatcher(dropDeletes, start), dropDeletesMatcher(dropDeletes, end), - any(NoLimitThroughputController.class), any(User.class)); + any(NoLimitThroughputController.class), any(Classes.> cast(Optional.class))); } /** Verify arbitrary flush. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index bd3a803..d4e848f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -138,7 +139,7 @@ public class TestStripeCompactor { StoreFileWritersCapture writers = new StoreFileWritersCapture(); StripeCompactor sc = createCompactor(writers, input); List paths = sc.compact(createDummyRequest(), Arrays.asList(boundaries), majorFrom, - majorTo, NoLimitThroughputController.INSTANCE, null); + majorTo, NoLimitThroughputController.INSTANCE, Optional.empty()); writers.verifyKvs(output, allFiles, true); if (allFiles) { assertEquals(output.length, paths.size()); @@ -174,7 +175,7 @@ public class TestStripeCompactor { StoreFileWritersCapture writers = new StoreFileWritersCapture(); StripeCompactor sc = createCompactor(writers, input); List paths = sc.compact(createDummyRequest(), targetCount, targetSize, left, right, null, - null, NoLimitThroughputController.INSTANCE, null); + null, NoLimitThroughputController.INSTANCE, Optional.empty()); assertEquals(output.length, paths.size()); writers.verifyKvs(output, true, true); List boundaries = new ArrayList<>(output.length + 2); -- 2.7.4