diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java
index 11da20f66a..fff68c7910 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/DaemonThreadFactory.java
@@ -19,10 +19,12 @@ package org.apache.hadoop.hbase;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Thread factory that creates daemon threads
*/
+@InterfaceAudience.Private
public class DaemonThreadFactory implements ThreadFactory {
private static final AtomicInteger poolNumber = new AtomicInteger(1);
private final ThreadGroup group;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java
index 8ab139f63c..c78f3b37aa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java
@@ -20,12 +20,14 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus;
import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The Class HealthCheckChore for running health checker regularly.
*/
+@InterfaceAudience.Private
public class HealthCheckChore extends ScheduledChore {
private static final Logger LOG = LoggerFactory.getLogger(HealthCheckChore.class);
private HealthChecker healthChecker;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
index 6fdc77e34d..5008354936 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/JMXListener.java
@@ -18,14 +18,6 @@
*/
package org.apache.hadoop.hbase;
-import com.google.protobuf.Service;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.rmi.registry.LocateRegistry;
@@ -34,13 +26,20 @@ import java.rmi.server.RMIClientSocketFactory;
import java.rmi.server.RMIServerSocketFactory;
import java.rmi.server.UnicastRemoteObject;
import java.util.HashMap;
-import java.util.Optional;
-
import javax.management.MBeanServer;
import javax.management.remote.JMXConnectorServer;
import javax.management.remote.JMXConnectorServerFactory;
import javax.management.remote.JMXServiceURL;
import javax.management.remote.rmi.RMIConnectorServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Pluggable JMX Agent for HBase(to fix the 2 random TCP ports issue
@@ -49,6 +48,7 @@ import javax.management.remote.rmi.RMIConnectorServer;
* 2)support password authentication
* 3)support subset of SSL (with default configuration)
*/
+@InterfaceAudience.Private
public class JMXListener implements MasterCoprocessor, RegionServerCoprocessor {
private static final Logger LOG = LoggerFactory.getLogger(JMXListener.class);
public static final String RMI_REGISTRY_PORT_CONF_KEY = ".rmi.registry.port";
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java
index d505d6fdad..5dffb73d3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIClientSocketFactorySecure.java
@@ -13,14 +13,15 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import java.net.Socket;
import java.util.ArrayList;
-
import javax.net.ssl.SSLSocket;
import javax.rmi.ssl.SslRMIClientSocketFactory;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Avoid SSL V3.0 "Poodle" Vulnerability - CVE-2014-3566
*/
@SuppressWarnings("serial")
+@InterfaceAudience.Private
public class SslRMIClientSocketFactorySecure extends SslRMIClientSocketFactory {
@Override
public Socket createSocket(String host, int port) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java
index 3583afeb4c..8a9223675a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SslRMIServerSocketFactorySecure.java
@@ -14,14 +14,15 @@ import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.ArrayList;
-
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
import javax.rmi.ssl.SslRMIServerSocketFactory;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Avoid SSL V3.0 "Poodle" Vulnerability - CVE-2014-3566
*/
+@InterfaceAudience.Private
public class SslRMIServerSocketFactorySecure extends SslRMIServerSocketFactory {
// If you add more constructors, you may have to change the rest of this implementation,
// which assumes an empty constructor, i.e. there are no specially enabled protocols or
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
index 3e911a8dd6..f0b3a41b84 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
@@ -25,13 +25,13 @@ import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,7 +48,8 @@ import org.slf4j.LoggerFactory;
* file, and use it to delete it. for a master, as the znode path constant whatever the server, we
* check its content to make sure that the backup server is not now in charge.
*/
-public class ZNodeClearer {
+@InterfaceAudience.Private
+public final class ZNodeClearer {
private static final Logger LOG = LoggerFactory.getLogger(ZNodeClearer.class);
private ZNodeClearer() {}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
index 93c9690d79..a4daaf0113 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveTableMonitor.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.backup.example;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
-
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -30,6 +30,7 @@ import org.slf4j.LoggerFactory;
*
* It is internally synchronized to ensure consistent view of the table state.
*/
+@InterfaceAudience.Private
public class HFileArchiveTableMonitor {
private static final Logger LOG = LoggerFactory.getLogger(HFileArchiveTableMonitor.class);
private final Set archivedTables = new TreeSet<>();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index 9ea67c1f13..bc3d85e1f2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.fs;
+import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.Field;
@@ -30,7 +31,6 @@ import java.lang.reflect.Modifier;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.URI;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -52,16 +52,16 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.util.Progressable;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import edu.umd.cs.findbugs.annotations.Nullable;
-
/**
* An encapsulation for the FileSystem object that hbase uses to access
* data. This class allows the flexibility of using
* separate filesystem objects for reading and writing hfiles and wals.
*/
+@InterfaceAudience.Private
public class HFileSystem extends FilterFileSystem {
public static final Logger LOG = LoggerFactory.getLogger(HFileSystem.class);
@@ -128,8 +128,8 @@ public class HFileSystem extends FilterFileSystem {
}
/**
- * Returns the filesystem that is specially setup for
- * doing reads from storage. This object avoids doing
+ * Returns the filesystem that is specially setup for
+ * doing reads from storage. This object avoids doing
* checksum verifications for reads.
* @return The FileSystem object that can be used to read data
* from files.
@@ -473,7 +473,7 @@ public class HFileSystem extends FilterFileSystem {
}
/**
- * The org.apache.hadoop.fs.FilterFileSystem does not yet support
+ * The org.apache.hadoop.fs.FilterFileSystem does not yet support
* createNonRecursive. This is a hadoop bug and when it is fixed in Hadoop,
* this definition will go away.
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java
index 344819b14b..3634ccb595 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIO.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hbase.io;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+@InterfaceAudience.Private
public class MetricsIO {
private final MetricsIOSource source;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java
index 82eb74ca1c..1ce762a0ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java
@@ -19,7 +19,9 @@
package org.apache.hadoop.hbase.io;
import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.yetus.audience.InterfaceAudience;
+@InterfaceAudience.Private
public class MetricsIOWrapperImpl implements MetricsIOWrapper {
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
index bb80abee72..054d54b478 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java
@@ -19,11 +19,13 @@ package org.apache.hadoop.hbase.io.hfile;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import org.apache.hadoop.hbase.metrics.impl.FastLongHistogram;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Snapshot of block cache age in cache.
* This object is preferred because we can control how it is serialized out when JSON'ing.
*/
+@InterfaceAudience.Private
@JsonIgnoreProperties({"ageHistogram", "snapshot"})
public class AgeSnapshot {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java
index 9d4ac87426..d2ac6f0a19 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.io.hfile;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
public enum BlockPriority {
/**
* Accessed a single time (used for scan-resistance)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java
index 4a5bb64103..b8f83578d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java
@@ -18,11 +18,13 @@
package org.apache.hadoop.hbase.io.hfile;
import org.apache.hadoop.hbase.Cell;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* BlockWithScanInfo is wrapper class for HFileBlock with other attributes. These attributes are
* supposed to be much cheaper to be maintained in each caller thread than in HFileBlock itself.
*/
+@InterfaceAudience.Private
public class BlockWithScanInfo {
private final HFileBlock hFileBlock;
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index ce8d53338e..175fb83d9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -28,15 +28,16 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class PrefetchExecutor {
+@InterfaceAudience.Private
+public final class PrefetchExecutor {
private static final Logger LOG = LoggerFactory.getLogger(PrefetchExecutor.class);
@@ -130,9 +131,10 @@ public class PrefetchExecutor {
public static boolean isCompleted(Path path) {
Future> future = prefetchFutures.get(path);
if (future != null) {
- return future.isDone();
+ return future.isDone();
}
return true;
}
+ private PrefetchExecutor() {}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java
index 2fab38a5db..9ca292751d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java
@@ -17,5 +17,8 @@
*/
package org.apache.hadoop.hbase.ipc;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
@SuppressWarnings("serial")
public class EmptyServiceNameException extends FatalConnectionException {}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
index d379e5f714..ce0f86d276 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
@@ -23,9 +23,9 @@ import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DaemonThreadFactory;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -36,6 +36,7 @@ import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil;
*
* This can be used for HMaster, where no prioritization is needed.
*/
+@InterfaceAudience.Private
public class FifoRpcScheduler extends RpcScheduler {
private static final Logger LOG = LoggerFactory.getLogger(FifoRpcScheduler.class);
private final int handlerCount;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
index 4afcc3341f..8753ebb39c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
@@ -19,6 +19,9 @@
package org.apache.hadoop.hbase.ipc;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper {
private RpcServer server;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java
index 9e51d2c4b2..ca1546cd83 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/QosPriority.java
@@ -19,14 +19,15 @@ package org.apache.hadoop.hbase.ipc;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
-
import org.apache.hadoop.hbase.HConstants;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Annotation which decorates RPC methods to denote the relative priority among other RPCs in the
* same server. Provides a basic notion of quality of service (QOS).
*/
@Retention(RetentionPolicy.RUNTIME)
+@InterfaceAudience.Private
public @interface QosPriority {
int priority() default HConstants.NORMAL_QOS;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java
index 87e78c2d6a..5b4a2c241b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java
@@ -17,7 +17,10 @@
*/
package org.apache.hadoop.hbase.ipc;
+import org.apache.yetus.audience.InterfaceAudience;
+
@SuppressWarnings("serial")
+@InterfaceAudience.Private
public class UnknownServiceException extends FatalConnectionException {
UnknownServiceException(final String msg) {
super(msg);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
index f25f3bfc4c..61eb28fcee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
@@ -17,16 +17,19 @@
*/
package org.apache.hadoop.hbase.master;
-import org.apache.hbase.thirdparty.com.google.protobuf.Message;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.regionserver.AnnotationReadingPriorityFunction;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.Message;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos;
-import org.apache.hadoop.hbase.regionserver.AnnotationReadingPriorityFunction;
-import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.security.User;
/**
* Priority function specifically for the master.
@@ -41,6 +44,7 @@ import org.apache.hadoop.hbase.security.User;
* that all requests to transition meta are handled in different threads from other report region
* in transition calls.
*/
+@InterfaceAudience.Private
public class MasterAnnotationReadingPriorityFunction extends AnnotationReadingPriorityFunction {
public MasterAnnotationReadingPriorityFunction(final RSRpcServices rpcServices) {
this(rpcServices, rpcServices.getClass());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
index 7d7dd81445..d13ffe9b83 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManager.java
@@ -18,11 +18,13 @@
package org.apache.hadoop.hbase.master;
+import static org.apache.hadoop.hbase.master.MetricsMaster.convertToProcedureMetrics;
+
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
+import org.apache.yetus.audience.InterfaceAudience;
-import static org.apache.hadoop.hbase.master.MetricsMaster.convertToProcedureMetrics;
-
+@InterfaceAudience.Private
public class MetricsAssignmentManager {
private final MetricsAssignmentManagerSource assignmentManagerSource;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
index 45dbeb8595..609ac1929d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
@@ -19,7 +19,9 @@
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+@InterfaceAudience.Private
public class MetricsMasterFileSystem {
private final MetricsMasterFileSystemSource source;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java
index 2d7c7979ee..483552d92a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshot.java
@@ -19,7 +19,9 @@
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+@InterfaceAudience.Private
public class MetricsSnapshot {
private final MetricsSnapshotSource source;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 36f57f23c8..44dfe41544 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RackManager;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -67,6 +68,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
* actual balancing algorithm.
*
*/
+@InterfaceAudience.Private
public abstract class BaseLoadBalancer implements LoadBalancer {
protected static final int MIN_SERVER_BALANCE = 2;
private volatile boolean stopped = false;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
index a783a07c29..5d1e1ccac2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
@@ -21,13 +21,14 @@ import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
-
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Class used to hold the current state of the cluster and how balanced it is.
*/
+@InterfaceAudience.Private
public class ClusterLoadState {
private final Map> clusterState;
private final NavigableMap> serversByLoad;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
index b65261029e..5a6659e271 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -66,6 +67,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
* {@link org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator}
*
*/
+@InterfaceAudience.Private
public class FavoredStochasticBalancer extends StochasticLoadBalancer implements
FavoredNodesPromoter {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java
index 3707536ebf..c4212694a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancer.java
@@ -19,10 +19,12 @@
package org.apache.hadoop.hbase.master.balancer;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Faced for exposing metrics about the balancer.
*/
+@InterfaceAudience.Private
public class MetricsBalancer {
private MetricsBalancerSource source = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java
index 850a9f5f49..ee24ff3fdb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancer.java
@@ -19,11 +19,13 @@
package org.apache.hadoop.hbase.master.balancer;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This metrics balancer uses extended source for stochastic load balancer
* to report its related metrics to JMX. For details, refer to HBASE-13965
*/
+@InterfaceAudience.Private
public class MetricsStochasticBalancer extends MetricsBalancer {
/**
* Use the stochastic source instead of the default source.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
index ed0ec9f77b..b58720062c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
@@ -17,17 +17,18 @@
*/
package org.apache.hadoop.hbase.master.cleaner;
+import java.util.Map;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.BaseConfigurable;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import java.util.Map;
-
/**
* Base class for file cleaners which allows subclasses to implement a simple
* isFileDeletable method (which used to be the FileCleanerDelegate contract).
*/
+@InterfaceAudience.Private
public abstract class BaseFileCleanerDelegate extends BaseConfigurable
implements FileCleanerDelegate {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index fdf5141734..46f6217a90 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
@@ -17,13 +17,6 @@
*/
package org.apache.hadoop.hbase.master.cleaner;
-import org.apache.hadoop.hbase.conf.ConfigurationObserver;
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.base.Predicate;
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
import java.io.IOException;
import java.util.Comparator;
import java.util.HashMap;
@@ -34,24 +27,32 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.RecursiveTask;
import java.util.concurrent.atomic.AtomicBoolean;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.base.Predicate;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
/**
* Abstract Cleaner that uses a chain of delegates to clean a directory of files
* @param Cleaner delegate class that is dynamically loaded from configuration
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD",
justification="TODO: Fix. It is wonky have static pool initialized from instance")
+@InterfaceAudience.Private
public abstract class CleanerChore extends ScheduledChore
implements ConfigurationObserver {
@@ -315,7 +316,7 @@ public abstract class CleanerChore extends Schedu
}
Iterable filteredFiles = cleaner.getDeletableFiles(deletableValidFiles);
-
+
// trace which cleaner is holding on to each file
if (LOG.isTraceEnabled()) {
ImmutableSet filteredFileSet = ImmutableSet.copyOf(filteredFiles);
@@ -325,10 +326,10 @@ public abstract class CleanerChore extends Schedu
}
}
}
-
+
deletableValidFiles = filteredFiles;
}
-
+
Iterable filesToDelete = Iterables.concat(invalidFiles, deletableValidFiles);
return deleteFiles(filesToDelete) == files.size();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 6c78914faf..358fd61c70 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.ServerListener;
import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProce
/**
* A remote procecdure dispatcher for regionservers.
*/
+@InterfaceAudience.Private
public class RSProcedureDispatcher
extends RemoteProcedureDispatcher
implements ServerListener {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java
index 301cd181f4..7f9029dad9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException;
import java.util.Set;
-
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -35,11 +34,13 @@ import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaState;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.R
* assigning meta region/s. Any place where meta is accessed and requires meta to be online, need to
* submit this procedure instead of duplicating steps to recover meta in the code.
*/
+@InterfaceAudience.Private
public class RecoverMetaProcedure
extends StateMachineProcedure
implements TableProcedureInterface {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 3d660729b2..5d8d6fa311 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.S
* queued on the rpc should have been notified about fail and should be concurrently
* getting themselves ready to assign elsewhere.
*/
+@InterfaceAudience.Private
public class ServerCrashProcedure
extends StateMachineProcedure
implements ServerProcedureInterface {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
index 222c9334b8..736257f7d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
@@ -19,17 +19,18 @@ package org.apache.hadoop.hbase.procedure;
import java.io.IOException;
import java.util.Hashtable;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.MetricsMaster;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
/**
* Provides the globally barriered procedure framework and environment for
- * master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster}
+ * master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster}
* interacts with the loaded procedure manager through this class.
*/
+@InterfaceAudience.Private
public class MasterProcedureManagerHost extends
ProcedureManagerHost {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
index 4c01eb8c61..5ae7a44b52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
@@ -18,21 +18,22 @@
package org.apache.hadoop.hbase.procedure;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides the globally barriered procedure framework and environment
- * for region server oriented operations.
+ * for region server oriented operations.
* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} interacts
* with the loaded procedure manager through this class.
*/
+@InterfaceAudience.Private
public class RegionServerProcedureManagerHost extends
ProcedureManagerHost {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
index 507e2721d5..4b69244383 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.procedure;
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
-
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionListener;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -50,6 +50,7 @@ import org.slf4j.LoggerFactory;
* There is a category of procedure (ex: online-snapshots), and a user-specified instance-specific
* barrierName. (ex: snapshot121126).
*/
+@InterfaceAudience.Private
abstract public class Subprocedure implements Callable {
private static final Logger LOG = LoggerFactory.getLogger(Subprocedure.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableSpaceQuotaSnapshotNotifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableSpaceQuotaSnapshotNotifier.java
index d81d7d304a..891a34ecd8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableSpaceQuotaSnapshotNotifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TableSpaceQuotaSnapshotNotifier.java
@@ -17,17 +17,18 @@
package org.apache.hadoop.hbase.quotas;
import java.io.IOException;
-
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link SpaceQuotaSnapshotNotifier} which uses the hbase:quota table.
*/
+@InterfaceAudience.Private
public class TableSpaceQuotaSnapshotNotifier implements SpaceQuotaSnapshotNotifier {
private static final Logger LOG = LoggerFactory.getLogger(TableSpaceQuotaSnapshotNotifier.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
index 294954cb34..0760df837b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/MissingSnapshotViolationPolicyEnforcement.java
@@ -18,18 +18,20 @@ package org.apache.hadoop.hbase.quotas.policies;
import java.io.IOException;
import java.util.List;
-
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.quotas.SpaceLimitingException;
import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* A {@link SpaceViolationPolicyEnforcement} which can be treated as a singleton. When a quota is
* not defined on a table or we lack quota information, we want to avoid creating a policy, keeping
* this path fast.
*/
-public class MissingSnapshotViolationPolicyEnforcement extends AbstractViolationPolicyEnforcement {
+@InterfaceAudience.Private
+public final class MissingSnapshotViolationPolicyEnforcement
+ extends AbstractViolationPolicyEnforcement {
private static final MissingSnapshotViolationPolicyEnforcement SINGLETON =
new MissingSnapshotViolationPolicyEnforcement();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DumpRegionServerMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DumpRegionServerMetrics.java
index 2b07a64253..4d49ea2f76 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DumpRegionServerMetrics.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DumpRegionServerMetrics.java
@@ -16,8 +16,6 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import org.apache.hadoop.hbase.util.JSONBean;
-
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
@@ -25,11 +23,14 @@ import java.lang.management.ManagementFactory;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
+import org.apache.hadoop.hbase.util.JSONBean;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Utility for doing JSON and MBeans.
*/
-public class DumpRegionServerMetrics {
+@InterfaceAudience.Private
+public final class DumpRegionServerMetrics {
/**
* Dump out a subset of regionserver mbeans only, not all of them, as json on System.out.
*/
@@ -57,4 +58,6 @@ public class DumpRegionServerMetrics {
String str = dumpMetrics();
System.out.println(str);
}
+
+ private DumpRegionServerMetrics() {}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
index 9753080bcf..84973db707 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SteppingSplitPolicy.java
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
public class SteppingSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy {
/**
* @return flushSize * 2 if there's exactly one region of the table in question
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java
index 03571d51f0..37b7059cc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionRequest.java
@@ -20,11 +20,12 @@ package org.apache.hadoop.hbase.regionserver.compactions;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
-
import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import org.apache.yetus.audience.InterfaceAudience;
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_DOESNT_OVERRIDE_EQUALS",
justification="It is intended to use the same equal method as superclass")
+@InterfaceAudience.Private
public class DateTieredCompactionRequest extends CompactionRequestImpl {
private List boundaries;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
index efbedc52ed..6814640dfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
@@ -21,19 +21,22 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ExtendedCellBuilder;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hbase.thirdparty.com.google.common.base.Predicate;
+
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+@InterfaceAudience.Private
public class BulkLoadCellFilter {
private static final Logger LOG = LoggerFactory.getLogger(BulkLoadCellFilter.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 22e8628641..a960c3146a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -66,7 +67,7 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongM
* Arguments: --distributed Polls each RS to dump information about the queue
* --hdfs Reports HDFS usage by the replication queues (note: can be overestimated).
*/
-
+@InterfaceAudience.Private
public class DumpReplicationQueues extends Configured implements Tool {
private static final Logger LOG = LoggerFactory.getLogger(DumpReplicationQueues.class.getName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index 4c8a7522fb..af6888c2c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -22,18 +22,19 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
+
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
/**
@@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminServic
* single peer to replicate to per set of data to replicate. Also handles
* keeping track of peer availability.
*/
+@InterfaceAudience.Private
public class ReplicationSinkManager {
private static final Logger LOG = LoggerFactory.getLogger(ReplicationSinkManager.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index c2862de5b4..62068fdd3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* In a scenario of Replication based Disaster/Recovery, when hbase Master-Cluster crashes, this
@@ -47,6 +48,7 @@ import org.apache.hadoop.util.ToolRunner;
* hbase org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp
*
*/
+@InterfaceAudience.Private
public class ReplicationSyncUp extends Configured implements Tool {
private static final long SLEEP_TIME = 10000;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
index f3e4853592..b25b7e21c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.java
@@ -29,12 +29,12 @@ import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
-
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.regionserver.querymatcher.NewVersionBehaviorTracker;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -42,6 +42,7 @@ import org.slf4j.LoggerFactory;
* Similar to MvccSensitiveTracker but tracks the visibility expression also before
* deciding if a Cell can be considered deleted
*/
+@InterfaceAudience.Private
public class VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTracker {
private static final Logger LOG =
LoggerFactory.getLogger(VisibilityNewVersionBehaivorTracker.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java
index 6887c31352..e39d601646 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplication.java
@@ -19,6 +19,8 @@
package org.apache.hadoop.hbase.security.visibility;
+import java.io.IOException;
+import java.util.Optional;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -26,9 +28,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-
-import java.io.IOException;
-import java.util.Optional;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* A RegionServerObserver impl that provides the custom
@@ -37,6 +37,7 @@ import java.util.Optional;
* replicated as string. The value for the configuration should be
* 'org.apache.hadoop.hbase.security.visibility.VisibilityController$VisibilityReplication'.
*/
+@InterfaceAudience.Private
public class VisibilityReplication implements RegionServerCoprocessor, RegionServerObserver {
private Configuration conf;
private VisibilityLabelService visibilityLabelService;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java
index b1ee66ef90..af9ce88714 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/CreateSnapshot.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.snapshot;
import java.util.Arrays;
import java.util.Locale;
-
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
@@ -28,13 +27,13 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotType;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This is a command line class that will snapshot a given table.
*/
+@InterfaceAudience.Private
public class CreateSnapshot extends AbstractHBaseTool {
private SnapshotType snapshotType = SnapshotType.FLUSH;
private TableName tableName = null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
index 738ffc28c1..9311200ac9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
@@ -18,6 +18,9 @@
*/
package org.apache.hadoop.hbase.tool;
+import java.io.IOException;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -26,13 +29,10 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.OperationStatus;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.util.Optional;
-import java.util.concurrent.atomic.AtomicLong;
-
/**
*
* This coprocessor 'shallows' all the writes. It allows to test a pure
@@ -59,7 +59,9 @@ import java.util.concurrent.atomic.AtomicLong;
* Will return:
* 0 row(s) in 0.0050 seconds
*
+ * TODO: It needs tests
*/
+@InterfaceAudience.Private
public class WriteSinkCoprocessor implements RegionCoprocessor, RegionObserver {
private static final Logger LOG = LoggerFactory.getLogger(WriteSinkCoprocessor.class);
private final AtomicLong ops = new AtomicLong();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java
index b12c592573..2d4de3b4d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/GetJavaProperty.java
@@ -18,10 +18,13 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.yetus.audience.InterfaceAudience;
+
/**
* A generic way for querying Java properties.
*/
-public class GetJavaProperty {
+@InterfaceAudience.Private
+public final class GetJavaProperty {
public static void main(String args[]) {
if (args.length == 0) {
for (Object prop: System.getProperties().keySet()) {
@@ -33,4 +36,6 @@ public class GetJavaProperty {
}
}
}
+
+ private GetJavaProperty() {}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index fb99cba376..03ed373f35 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -26,11 +26,13 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Helper class for all utilities related to archival/retrieval of HFiles
*/
-public class HFileArchiveUtil {
+@InterfaceAudience.Private
+public final class HFileArchiveUtil {
private HFileArchiveUtil() {
// non-external instantiation - util class
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
index 554d6f51a7..fb2a954174 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RollingStatCalculator.java
@@ -19,6 +19,8 @@
package org.apache.hadoop.hbase.util;
+import org.apache.yetus.audience.InterfaceAudience;
+
/**
* This class maintains mean and variation for any sequence of input provided to it.
* It is initialized with number of rolling periods which basically means the number of past
@@ -30,6 +32,7 @@ package org.apache.hadoop.hbase.util;
* from the start the statistics may behave like constants and may ignore short trends.
* All operations are O(1) except the initialization which is O(N).
*/
+@InterfaceAudience.Private
public class RollingStatCalculator {
private double currentSum;
private double currentSqrSum;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
index 8a43d174b3..769d480d5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.util;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -35,12 +34,14 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint;
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Similar to {@link RegionReplicaUtil} but for the server side
*/
+@InterfaceAudience.Private
public class ServerRegionReplicaUtil extends RegionReplicaUtil {
private static final Logger LOG = LoggerFactory.getLogger(ServerRegionReplicaUtil.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
index e731bd7043..17da6812fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ShutdownHookManager.java
@@ -17,13 +17,15 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This class provides ShutdownHookManager shims for HBase to interact with the Hadoop 1.0.x and the
* Hadoop 2.0+ series.
- *
+ *
* NOTE: No testing done against 0.22.x, or 0.21.x.
*/
+@InterfaceAudience.Private
abstract public class ShutdownHookManager {
private static ShutdownHookManager instance;
@@ -39,13 +41,13 @@ abstract public class ShutdownHookManager {
}
abstract public void addShutdownHook(Thread shutdownHook, int priority);
-
+
abstract public boolean removeShutdownHook(Runnable shutdownHook);
-
+
public static void affixShutdownHook(Thread shutdownHook, int priority) {
instance.addShutdownHook(shutdownHook, priority);
}
-
+
public static boolean deleteShutdownHook(Runnable shutdownHook) {
return instance.removeShutdownHook(shutdownHook);
}
@@ -56,14 +58,14 @@ abstract public class ShutdownHookManager {
public void addShutdownHook(Thread shutdownHookThread, int priority) {
Runtime.getRuntime().addShutdownHook(shutdownHookThread);
}
-
+
@Override
public boolean removeShutdownHook(Runnable shutdownHook) {
Thread shutdownHookThread = null;
if (!(shutdownHook instanceof Thread)) {
shutdownHookThread = new Thread(shutdownHook);
} else shutdownHookThread = (Thread) shutdownHook;
-
+
return Runtime.getRuntime().removeShutdownHook(shutdownHookThread);
}
}
@@ -72,7 +74,7 @@ abstract public class ShutdownHookManager {
@Override
public void addShutdownHook(Thread shutdownHookThread, int priority) {
try {
- Methods.call(shutdownHookManagerClass,
+ Methods.call(shutdownHookManagerClass,
Methods.call(shutdownHookManagerClass, null, "get", null, null),
"addShutdownHook",
new Class[] { Runnable.class, int.class },
@@ -81,12 +83,12 @@ abstract public class ShutdownHookManager {
throw new RuntimeException("we could not use ShutdownHookManager.addShutdownHook", ex);
}
}
-
+
@Override
public boolean removeShutdownHook(Runnable shutdownHook) {
try {
return (Boolean)
- Methods.call(shutdownHookManagerClass,
+ Methods.call(shutdownHookManagerClass,
Methods.call(shutdownHookManagerClass, null, "get", null, null),
"removeShutdownHook",
new Class[] { Runnable.class },
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
index 3f5576e4a5..f896e550a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/SortedList.java
@@ -26,6 +26,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.RandomAccess;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Simple sorted list implementation that uses {@link java.util.ArrayList} as
@@ -38,7 +39,7 @@ import java.util.RandomAccess;
* time of invocation, so will not see any mutations to the collection during
* their operation. Iterating over list elements manually using the
* RandomAccess pattern involves multiple operations. For this to be safe get
- * a reference to the internal list first using get().
+ * a reference to the internal list first using get().
*
* If constructed with a {@link java.util.Comparator}, the list will be sorted
* using the comparator. Adding or changing an element using an index will
@@ -48,6 +49,7 @@ import java.util.RandomAccess;
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UG_SYNC_SET_UNSYNC_GET",
justification="TODO: synchronization in here needs review!!!")
+@InterfaceAudience.Private
public class SortedList implements List, RandomAccess {
private volatile List list;
private final Comparator super E> comparator;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
index 742a2efec7..3070fb3727 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
@@ -21,11 +21,12 @@ package org.apache.hadoop.hbase.util;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Reservoir;
import com.codahale.metrics.Snapshot;
-
import java.lang.reflect.Constructor;
import java.text.DecimalFormat;
+import org.apache.yetus.audience.InterfaceAudience;
/** Utility functions for working with Yammer Metrics. */
+@InterfaceAudience.Private
public final class YammerHistogramUtils {
// not for public consumption
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java
index 4ca0e7441a..d5f5f5320c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandler.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.hbase.util.hbck;
import java.io.IOException;
import java.util.Collection;
-
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This interface provides callbacks for handling particular table integrity
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
* and handling overlaps but currently preserves the older more specific error
* condition codes.
*/
+@InterfaceAudience.Private
public interface TableIntegrityErrorHandler {
TableInfo getTableInfo();
@@ -48,7 +49,7 @@ public interface TableIntegrityErrorHandler {
* has an empty start key.
*/
void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException;
-
+
/**
* Callback for handling case where a Table has a last region that does not
* have an empty end key.
@@ -68,7 +69,7 @@ public interface TableIntegrityErrorHandler {
/**
* Callback for handling two regions that have the same start key. This is
* a specific case of a region overlap.
- * @param hi1 one of the overlapping HbckInfo
+ * @param hi1 one of the overlapping HbckInfo
* @param hi2 the other overlapping HbckInfo
*/
void handleDuplicateStartKeys(HbckInfo hi1, HbckInfo hi2) throws IOException;
@@ -96,7 +97,7 @@ public interface TableIntegrityErrorHandler {
* Callback for handling a region hole between two keys.
* @param holeStartKey key at the beginning of the region hole
* @param holeEndKey key at the end of the region hole
-
+
*/
void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeEndKey)
throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java
index af379fd1ee..96039b5f42 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableIntegrityErrorHandlerImpl.java
@@ -19,14 +19,15 @@ package org.apache.hadoop.hbase.util.hbck;
import java.io.IOException;
import java.util.Collection;
-
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Simple implementation of TableIntegrityErrorHandler. Can be used as a base
* class.
*/
+@InterfaceAudience.Private
abstract public class TableIntegrityErrorHandlerImpl implements
TableIntegrityErrorHandler {
TableInfo ti;
@@ -53,7 +54,7 @@ abstract public class TableIntegrityErrorHandlerImpl implements
@Override
public void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException {
}
-
+
/**
* {@inheritDoc}
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
index 0836b5d147..12b63f5b81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/NettyAsyncFSWALConfigHelper.java
@@ -22,6 +22,7 @@ import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
@@ -31,7 +32,8 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
* Helper class for passing netty event loop config to {@link AsyncFSWALProvider}.
* @since 2.0.0
*/
-public class NettyAsyncFSWALConfigHelper {
+@InterfaceAudience.Private
+public final class NettyAsyncFSWALConfigHelper {
private static final String EVENT_LOOP_CONFIG = "hbase.wal.async.event-loop.config";
@@ -59,4 +61,6 @@ public class NettyAsyncFSWALConfigHelper {
}
return EVENT_LOOP_CONFIG_MAP.get(name);
}
+
+ private NettyAsyncFSWALConfigHelper() {}
}
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 58ef7ed444..8eaefaab83 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -116,6 +116,10 @@
org.codehaus.mojo
findbugs-maven-plugin
+
+ net.revelc.code
+ warbucks-maven-plugin
+
diff --git a/hbase-spark-it/pom.xml b/hbase-spark-it/pom.xml
index 2cac0f98f5..74de0a0970 100644
--- a/hbase-spark-it/pom.xml
+++ b/hbase-spark-it/pom.xml
@@ -180,6 +180,10 @@
true
+
+ net.revelc.code
+ warbucks-maven-plugin
+
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 3aeb4708fc..05fd779415 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -563,6 +563,27 @@
true
+
+ net.revelc.code
+ warbucks-maven-plugin
+
+
+
+ true
+
+
+
+ (?!.*(.generated.|.tmpl.|\$|org.apache.hadoop.hbase.spark.hbase.package)).*
+ false
+ true
+ false
+ false
+ false
+ org[.]apache[.]yetus[.]audience[.]InterfaceAudience.*
+
+
+
+
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
index d7c424e30f..8cf2c7fed2 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
@@ -29,11 +28,13 @@ import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This is a simple example of deleting records in HBase
* with the bulkDelete function.
*/
+@InterfaceAudience.Private
final public class JavaHBaseBulkDeleteExample {
private JavaHBaseBulkDeleteExample() {}
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
index cb9e0c7fde..b5143de018 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -32,11 +31,13 @@ import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This is a simple example of getting records in HBase
* with the bulkGet function.
*/
+@InterfaceAudience.Private
final public class JavaHBaseBulkGetExample {
private JavaHBaseBulkGetExample() {}
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
index f0f3e79f97..6738059c96 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@@ -33,6 +32,7 @@ import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Run this example using command below:
@@ -45,6 +45,7 @@ import org.apache.spark.api.java.function.Function;
* 'hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles' to load the HFiles into table to
* verify this example.
*/
+@InterfaceAudience.Private
final public class JavaHBaseBulkLoadExample {
private JavaHBaseBulkLoadExample() {}
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
index 5821c1957a..4a80b96a98 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
@@ -29,11 +28,13 @@ import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This is a simple example of putting records in HBase
* with the bulkPut function.
*/
+@InterfaceAudience.Private
final public class JavaHBaseBulkPutExample {
private JavaHBaseBulkPutExample() {}
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
index 8d4c0929ef..0d4f680642 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hbase.spark.example.hbasecontext;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
@@ -29,14 +28,15 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
-
import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
import scala.Tuple2;
/**
* This is a simple example of scanning records from HBase
* with the hbaseRDD function.
*/
+@InterfaceAudience.Private
final public class JavaHBaseDistributedScan {
private JavaHBaseDistributedScan() {}
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
index 9a1259ea34..a55d8532cd 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
@@ -36,13 +35,14 @@ import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
-
+import org.apache.yetus.audience.InterfaceAudience;
import scala.Tuple2;
/**
* This is a simple example of using the foreachPartition
* method with a HBase connection
*/
+@InterfaceAudience.Private
final public class JavaHBaseMapGetPutExample {
private JavaHBaseMapGetPutExample() {}
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
index cd4cf24f15..74fadc6532 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
@@ -28,10 +28,12 @@ import org.apache.spark.api.java.function.Function;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* This is a simple example of BulkPut with Spark Streaming
*/
+@InterfaceAudience.Private
final public class JavaHBaseStreamingBulkPutExample {
private JavaHBaseStreamingBulkPutExample() {}
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
index 8eb4dd921f..1fc92c0ea7 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
@@ -18,16 +18,22 @@
package org.apache.hadoop.hbase.spark
import java.io.IOException
-
import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hbase.client.{Admin, Connection, ConnectionFactory, RegionLocator, Table}
+import org.apache.hadoop.hbase.client.Admin
+import org.apache.hadoop.hbase.client.Connection
+import org.apache.hadoop.hbase.client.ConnectionFactory
+import org.apache.hadoop.hbase.client.RegionLocator
+import org.apache.hadoop.hbase.client.Table
import org.apache.hadoop.hbase.ipc.RpcControllerFactory
-import org.apache.hadoop.hbase.security.{User, UserProvider}
+import org.apache.hadoop.hbase.security.User
+import org.apache.hadoop.hbase.security.UserProvider
import org.apache.hadoop.hbase.spark.datasources.HBaseSparkConf
-import org.apache.hadoop.hbase.{HConstants, TableName}
-
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.hbase.TableName
+import org.apache.yetus.audience.InterfaceAudience
import scala.collection.mutable
+@InterfaceAudience.Private
private[spark] object HBaseConnectionCache extends Logging {
// A hashmap of Spark-HBase connections. Key is HBaseConnectionKey.
@@ -130,6 +136,7 @@ private[spark] object HBaseConnectionCache extends Logging {
}
}
+@InterfaceAudience.Private
private[hbase] case class SmartConnection (
connection: Connection, var refCount: Int = 0, var timestamp: Long = 0) {
def getTable(tableName: TableName): Table = connection.getTable(tableName)
@@ -153,6 +160,7 @@ private[hbase] case class SmartConnection (
* that may be used in the process of establishing a connection.
*
*/
+@InterfaceAudience.Private
class HBaseConnectionKey(c: Configuration) extends Logging {
val conf: Configuration = c
val CONNECTION_PROPERTIES: Array[String] = Array[String](
@@ -256,6 +264,7 @@ class HBaseConnectionKey(c: Configuration) extends Logging {
* @param numActualConnectionsCreated number of actual HBase connections the cache ever created
* @param numActiveConnections number of current alive HBase connections the cache is holding
*/
+@InterfaceAudience.Private
case class HBaseConnectionCacheStat(var numTotalRequests: Long,
var numActualConnectionsCreated: Long,
var numActiveConnections: Long)
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
index 0156e70726..d376bf2408 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
@@ -1112,6 +1112,7 @@ class HBaseContext(@transient val sc: SparkContext,
class WriterLength(var written:Long, val writer:StoreFileWriter)
}
+@InterfaceAudience.Private
object LatestHBaseContextCache {
var latest:HBaseContext = null
}
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
index 9a67477dad..a92f4e0ce6 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
@@ -17,9 +17,10 @@
package org.apache.hadoop.hbase.spark
-import org.apache.log4j.LogManager
-import org.slf4j.{Logger, LoggerFactory}
+import org.apache.yetus.audience.InterfaceAudience
import org.slf4j.impl.StaticLoggerBinder
+import org.slf4j.Logger
+import org.slf4j.LoggerFactory
/**
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
@@ -28,6 +29,7 @@ import org.slf4j.impl.StaticLoggerBinder
* Logging is private in Spark 2.0
* This is to isolate incompatibilties across Spark releases.
*/
+@InterfaceAudience.Private
trait Logging {
// Make the log field transient so that objects with Logging can
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
index efeaa7cf71..6a656677e1 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
@@ -266,6 +266,7 @@ class HBaseTableScanRDD(relation: HBaseRelation,
}
}
+@InterfaceAudience.Private
case class SerializedFilter(b: Option[Array[Byte]])
object SerializedFilter {
@@ -278,13 +279,14 @@ object SerializedFilter {
}
}
+@InterfaceAudience.Private
private[hbase] case class HBaseRegion(
override val index: Int,
val start: Option[HBaseType] = None,
val end: Option[HBaseType] = None,
val server: Option[String] = None) extends Partition
-
+@InterfaceAudience.Private
private[hbase] case class HBaseScanPartition(
override val index: Int,
val regions: HBaseRegion,
@@ -292,6 +294,7 @@ private[hbase] case class HBaseScanPartition(
val points: Seq[Array[Byte]],
val sf: SerializedFilter) extends Partition
+@InterfaceAudience.Private
case class RDDResources(set: mutable.HashSet[Resource]) {
def addResource(s: Resource) {
set += s
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
index 98cc8719c9..fc0e4d00bb 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
@@ -17,26 +17,18 @@
package org.apache.hadoop.hbase.spark.datasources
-import java.io.ByteArrayInputStream
-
-import org.apache.avro.Schema
-import org.apache.avro.Schema.Type._
-import org.apache.avro.generic.GenericDatumReader
-import org.apache.avro.generic.GenericDatumWriter
-import org.apache.avro.generic.GenericRecord
-import org.apache.avro.generic.{GenericDatumReader, GenericDatumWriter, GenericRecord}
-import org.apache.avro.io._
-import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.spark.sql.types._
+import org.apache.yetus.audience.InterfaceAudience
// TODO: This is not really used in code.
+@InterfaceAudience.Public
trait SerDes {
def serialize(value: Any): Array[Byte]
def deserialize(bytes: Array[Byte], start: Int, end: Int): Any
}
// TODO: This is not really used in code.
+@InterfaceAudience.Private
class DoubleSerDes extends SerDes {
override def serialize(value: Any): Array[Byte] = Bytes.toBytes(value.asInstanceOf[Double])
override def deserialize(bytes: Array[Byte], start: Int, end: Int): Any = {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
index ce7b55a7a4..8f1f15c2ec 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
@@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.util.Bytes
import scala.math.Ordering
+// TODO: add @InterfaceAudience.Private if https://issues.scala-lang.org/browse/SI-3600 is resolved
package object hbase {
type HBaseType = Array[Byte]
def bytesMin = new Array[Byte](0)
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
index c09e99d906..fda3c785ea 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
@@ -21,16 +21,20 @@ import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
import org.apache.hadoop.hbase.spark.AvroSerdes
import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog
-import org.apache.spark.sql.{DataFrame, SQLContext}
-import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* @param col0 Column #0, Type is String
* @param col1 Column #1, Type is Array[Byte]
*/
+@InterfaceAudience.Private
case class AvroHBaseRecord(col0: String,
col1: Array[Byte])
-
+@InterfaceAudience.Private
object AvroHBaseRecord {
val schemaString =
s"""{"namespace": "example.avro",
@@ -58,7 +62,7 @@ object AvroHBaseRecord {
favoriteArray.add(s"number${i}")
favoriteArray.add(s"number${i+1}")
user.put("favorite_array", favoriteArray)
- import collection.JavaConverters._
+ import scala.collection.JavaConverters._
val favoriteMap = Map[String, Int](("key1" -> i), ("key2" -> (i+1))).asJava
user.put("favorite_map", favoriteMap)
val avroByte = AvroSerdes.serialize(user, avroSchema)
@@ -66,6 +70,7 @@ object AvroHBaseRecord {
}
}
+@InterfaceAudience.Private
object AvroSource {
def catalog = s"""{
|"table":{"namespace":"default", "name":"ExampleAvrotable"},
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
index 96c6d6e4f9..06303716af 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
@@ -17,13 +17,18 @@
package org.apache.hadoop.hbase.spark.example.datasources
-import org.apache.spark.sql.{DataFrame, SQLContext}
-import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
+@InterfaceAudience.Private
class UserCustomizedSampleException(message: String = null, cause: Throwable = null) extends
RuntimeException(UserCustomizedSampleException.message(message, cause), cause)
+@InterfaceAudience.Private
object UserCustomizedSampleException {
def message(message: String, cause: Throwable) =
if (message != null) message
@@ -31,6 +36,7 @@ object UserCustomizedSampleException {
else null
}
+@InterfaceAudience.Private
case class IntKeyRecord(
col0: Integer,
col1: Boolean,
@@ -56,6 +62,7 @@ object IntKeyRecord {
}
}
+@InterfaceAudience.Private
object DataType {
val cat = s"""{
|"table":{"namespace":"default", "name":"DataTypeExampleTable"},
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
index 056c071d5d..b414a379f5 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
@@ -17,10 +17,14 @@
package org.apache.hadoop.hbase.spark.example.datasources
-import org.apache.spark.sql.{DataFrame, SQLContext}
-import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
+@InterfaceAudience.Private
case class HBaseRecord(
col0: String,
col1: Boolean,
@@ -32,6 +36,7 @@ case class HBaseRecord(
col7: String,
col8: Byte)
+@InterfaceAudience.Private
object HBaseRecord {
def apply(i: Int): HBaseRecord = {
val s = s"""row${"%03d".format(i)}"""
@@ -47,6 +52,7 @@ object HBaseRecord {
}
}
+@InterfaceAudience.Private
object HBaseSource {
val cat = s"""{
|"table":{"namespace":"default", "name":"HBaseSourceExampleTable"},
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
index 46135a5948..506fd229fc 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
@@ -17,17 +17,20 @@
package org.apache.hadoop.hbase.spark.example.hbasecontext
+import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Delete
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of deleting records in HBase
* with the bulkDelete function.
*/
+@InterfaceAudience.Private
object HBaseBulkDeleteExample {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
index 1bdc90ddc6..58bc1d430c 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
@@ -17,18 +17,22 @@
package org.apache.hadoop.hbase.spark.example.hbasecontext
-import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{CellUtil, TableName, HBaseConfiguration}
-import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.client.Result
+import org.apache.hadoop.hbase.spark.HBaseContext
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.hbase.CellUtil
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of getting records from HBase
* with the bulkGet function.
*/
+@InterfaceAudience.Private
object HBaseBulkGetExample {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
index 063f2c2d6b..0a6f379f08 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
@@ -17,17 +17,20 @@
package org.apache.hadoop.hbase.spark.example.hbasecontext
+import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Put
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of putting records in HBase
* with the bulkPut function.
*/
+@InterfaceAudience.Private
object HBaseBulkPutExample {
def main(args: Array[String]) {
if (args.length < 2) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
index 37a0358653..51ff0da6a9 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
@@ -17,21 +17,24 @@
package org.apache.hadoop.hbase.spark.example.hbasecontext
+import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Put
-import org.apache.hadoop.mapred.TextInputFormat
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.Text
+import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of putting records in HBase
* with the bulkPut function. In this example we are
* getting the put information from a file
*/
+@InterfaceAudience.Private
object HBaseBulkPutExampleFromFile {
def main(args: Array[String]) {
if (args.length < 3) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
index fa782166d7..9bfcc2c5dd 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
@@ -19,16 +19,18 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
+import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.Put
import org.apache.spark.SparkConf
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of putting records in HBase
* with the bulkPut function. In this example we are
* also setting the timestamp in the put
*/
+@InterfaceAudience.Private
object HBaseBulkPutTimestampExample {
def main(args: Array[String]) {
if (args.length < 2) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
index bb2e79d08b..7d8643a9f0 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
@@ -16,16 +16,19 @@
*/
package org.apache.hadoop.hbase.spark.example.hbasecontext
+import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Scan
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of scanning records from HBase
* with the hbaseRDD function in Distributed fashion.
*/
+@InterfaceAudience.Private
object HBaseDistributedScanExample {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
index 8ac93efe48..20a22f73c6 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
@@ -17,18 +17,21 @@
package org.apache.hadoop.hbase.spark.example.hbasecontext
+import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Put
-import org.apache.spark.streaming.StreamingContext
-import org.apache.spark.streaming.Seconds
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.spark.streaming.Seconds
+import org.apache.spark.streaming.StreamingContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of BulkPut with Spark Streaming
*/
+@InterfaceAudience.Private
object HBaseStreamingBulkPutExample {
def main(args: Array[String]) {
if (args.length < 4) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
index 83d3f9e301..0ba4d1c4e2 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
@@ -17,17 +17,20 @@
package org.apache.hadoop.hbase.spark.example.rdd
import org.apache.hadoop.hbase.client.Delete
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
import org.apache.hadoop.hbase.util.Bytes
-
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of deleting records in HBase
* with the bulkDelete function.
*/
+@InterfaceAudience.Private
object HBaseBulkDeleteExample {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
index eedabc3a6c..0736f6e954 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
@@ -16,17 +16,23 @@
*/
package org.apache.hadoop.hbase.spark.example.rdd
-import org.apache.hadoop.hbase.client.{Result, Get}
-import org.apache.hadoop.hbase.{CellUtil, TableName, HBaseConfiguration}
+import org.apache.hadoop.hbase.client.Get
+import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.hbase.CellUtil
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of getting records from HBase
* with the bulkGet function.
*/
+@InterfaceAudience.Private
object HBaseBulkGetExample {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
index 28711b8878..9f5885fa66 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
@@ -21,13 +21,17 @@ import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
-import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of putting records in HBase
* with the bulkPut function.
*/
+@InterfaceAudience.Private
object HBaseBulkPutExample {
def main(args: Array[String]) {
if (args.length < 2) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
index 8dfefc2618..be257eeff9 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
@@ -17,17 +17,21 @@
package org.apache.hadoop.hbase.spark.example.rdd
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of using the foreachPartition
* method with a HBase connection
*/
+@InterfaceAudience.Private
object HBaseForeachPartitionExample {
def main(args: Array[String]) {
if (args.length < 2) {
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
index 0d0b314b7e..079352409d 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
@@ -18,16 +18,20 @@
package org.apache.hadoop.hbase.spark.example.rdd
import org.apache.hadoop.hbase.client.Get
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.spark.HBaseContext
import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
import org.apache.hadoop.hbase.util.Bytes
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
/**
* This is a simple example of using the mapPartitions
* method with a HBase connection
*/
+@InterfaceAudience.Private
object HBaseMapPartitionExample {
def main(args: Array[String]) {
if (args.length < 1) {
diff --git a/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala b/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
index 37ee34694f..430785222a 100644
--- a/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
+++ b/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
@@ -19,11 +19,14 @@ package org.apache.spark.sql.datasources.hbase
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.types.DataType
+import org.apache.yetus.audience.InterfaceAudience
+@InterfaceAudience.Private
trait DataTypeParser {
def parse(dataTypeString: String): DataType
}
+@InterfaceAudience.Private
object DataTypeParserWrapper extends DataTypeParser{
def parse(dataTypeString: String): DataType = CatalystSqlParser.parseDataType(dataTypeString)
}
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index a9919532c2..f1624df6ec 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -145,6 +145,10 @@
true
+
+ net.revelc.code
+ warbucks-maven-plugin
+
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
index eab530785b..b75b4334f7 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
@@ -18,6 +18,9 @@
*/
package org.apache.hadoop.hbase.thrift;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
public class HttpAuthenticationException extends Exception {
private static final long serialVersionUID = 0;
/**
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
index 3dad28a7e0..e36d6391be 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
@@ -31,7 +31,6 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.LongAdder;
-
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler;
@@ -40,6 +39,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.thrift.TException;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -52,6 +52,7 @@ import org.slf4j.LoggerFactory;
* thrift server dies or is shut down before everything in the queue is drained.
*
*/
+@InterfaceAudience.Private
public class IncrementCoalescer implements IncrementCoalescerMBean {
/**
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
index 604fa97e3a..06cf193fe0 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
@@ -18,6 +18,9 @@
package org.apache.hadoop.hbase.thrift;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
public interface IncrementCoalescerMBean {
int getQueueSize();
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
index 973cad7eb0..c86f47616e 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
@@ -22,12 +22,14 @@ import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* A ThreadPoolExecutor customized for working with HBase thrift to update metrics before and
* after the execution of a task.
*/
+@InterfaceAudience.Private
public class THBaseThreadPoolExecutor extends ThreadPoolExecutor {
private ThriftMetrics metrics;
diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml
index d4a47e2fa2..a7cde510a9 100644
--- a/hbase-zookeeper/pom.xml
+++ b/hbase-zookeeper/pom.xml
@@ -91,6 +91,10 @@
org.codehaus.mojo
findbugs-maven-plugin
+
+ net.revelc.code
+ warbucks-maven-plugin
+
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
index a50ce4c8c6..7413879b0b 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
@@ -19,10 +19,10 @@
package org.apache.hadoop.hbase.zookeeper;
import java.io.IOException;
-
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionNormalizerProtos;
/**
* Tracks region normalizer state up in ZK
*/
+@InterfaceAudience.Private
public class RegionNormalizerTracker extends ZKNodeTracker {
private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerTracker.class);
diff --git a/pom.xml b/pom.xml
index 27d786bbb3..de4bda310d 100755
--- a/pom.xml
+++ b/pom.xml
@@ -984,6 +984,34 @@
true
+
+ net.revelc.code
+ warbucks-maven-plugin
+ ${maven.warbucks.version}
+
+ false
+
+
+
+ (?!.*(.generated.|.tmpl.|\$)).*
+ false
+ true
+ false
+ false
+ false
+ org[.]apache[.]yetus[.]audience[.]InterfaceAudience.*
+
+
+
+
+
+ run-warbucks
+
+ check
+
+
+
+
@@ -1489,6 +1517,7 @@
3.0.0
3.4
3.0.1
+ 1.1.0
1.5.0.Final
2.8.2
3.2.2
--
2.16.2.windows.1