diff --git hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java index 6005871..eb03a21 100644 --- hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java +++ hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.classification.tools; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import com.sun.javadoc.DocErrorReporter; import com.sun.javadoc.LanguageVersion; import com.sun.javadoc.RootDoc; @@ -29,6 +30,7 @@ import com.sun.tools.doclets.standard.Standard; * {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}. * It delegates to the Standard Doclet, and takes the same options. */ +@InterfaceAudience.Private public class ExcludePrivateAnnotationsStandardDoclet { public static LanguageVersion languageVersion() { diff --git hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java index c283c91..def4f1a 100644 --- hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java +++ hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.classification.tools; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + import com.sun.javadoc.DocErrorReporter; import com.sun.javadoc.LanguageVersion; import com.sun.javadoc.RootDoc; @@ -33,6 +35,7 @@ import com.sun.tools.doclets.standard.Standard; * are also excluded. * It delegates to the Standard Doclet, and takes the same options. */ +@InterfaceAudience.Private public class IncludePublicAnnotationsStandardDoclet { public static LanguageVersion languageVersion() { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java index 6cd52e8..d2d92b3 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java @@ -18,6 +18,9 @@ */ package org.apache.hadoop.hbase; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * Ways to keep cells marked for delete around. */ @@ -25,6 +28,8 @@ package org.apache.hadoop.hbase; * Don't change the TRUE/FALSE labels below, these have to be called * this way for backwards compatibility. */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public enum KeepDeletedCells { /** Deleted Cells are not retained. */ FALSE, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 3806115..3f55b0e 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; @@ -90,6 +91,7 @@ import com.google.common.annotations.VisibleForTesting; * gets as well. *

*/ +@InterfaceAudience.Private class AsyncProcess { protected static final Log LOG = LogFactory.getLog(AsyncProcess.class); protected static final AtomicLong COUNTER = new AtomicLong(); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java index 44f1eca..984a867 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private public class DelegatingRetryingCallable> implements RetryingCallable { protected final D delegate; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java index 9d685b8..16707cb 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.client; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Keeps track of repeated failures to any region server. Multiple threads manipulate the contents diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 34f90d5..911e034 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -21,6 +21,9 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.List; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * Used to communicate with a single HBase table. * Obtain an instance from an {@link HConnection}. @@ -29,6 +32,8 @@ import java.util.List; * @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead */ @Deprecated +@InterfaceAudience.Private +@InterfaceStability.Stable public interface HTableInterface extends Table { /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 8d0fbc8..7d61a0b 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -57,20 +57,20 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * Each put will be sharded into different buffer queues based on its destination region server. * So each region server buffer queue will only have the puts which share the same destination. * And each queue will have a flush worker thread to flush the puts request to the region server. - * If any queue is full, the HTableMultiplexer starts to drop the Put requests for that + * If any queue is full, the HTableMultiplexer starts to drop the Put requests for that * particular queue. - * + * * Also all the puts will be retried as a configuration number before dropping. * And the HTableMultiplexer can report the number of buffered requests and the number of the * failed (dropped) requests in total or on per region server basis. - * + * * This class is thread safe. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class HTableMultiplexer { private static final Log LOG = LogFactory.getLog(HTableMultiplexer.class.getName()); - + public static final String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS = "hbase.tablemultiplexer.flush.period.ms"; public static final String TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads"; @@ -89,7 +89,7 @@ public class HTableMultiplexer { private final int maxKeyValueSize; private final ScheduledExecutorService executor; private final long flushPeriod; - + /** * @param conf The HBaseConfiguration * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for @@ -128,7 +128,7 @@ public class HTableMultiplexer { } /** - * The puts request will be buffered by their corresponding buffer queue. + * The puts request will be buffered by their corresponding buffer queue. * Return the list of puts which could not be queued. * @param tableName * @param puts @@ -138,13 +138,13 @@ public class HTableMultiplexer { public List put(TableName tableName, final List puts) { if (puts == null) return null; - + List failedPuts = null; boolean result; for (Put put : puts) { result = put(tableName, put, this.retryNum); if (result == false) { - + // Create the failed puts list if necessary if (failedPuts == null) { failedPuts = new ArrayList(); @@ -163,7 +163,7 @@ public class HTableMultiplexer { public List put(byte[] tableName, final List puts) { return put(TableName.valueOf(tableName), puts); } - + /** * The put request will be buffered by its corresponding buffer queue. And the put request will be * retried before dropping the request. @@ -185,7 +185,7 @@ public class HTableMultiplexer { // Generate a MultiPutStatus object and offer it into the queue PutStatus s = new PutStatus(loc.getRegionInfo(), put, retry); - + return queue.offer(s); } } catch (IOException e) { @@ -209,7 +209,7 @@ public class HTableMultiplexer { public boolean put(final byte[] tableName, Put put) { return put(TableName.valueOf(tableName), put); } - + /** * @return the current HTableMultiplexerStatus */ @@ -239,6 +239,8 @@ public class HTableMultiplexer { * report the number of buffered requests and the number of the failed (dropped) requests * in total or on per region server basis. */ + @InterfaceAudience.Public + @InterfaceStability.Evolving public static class HTableMultiplexerStatus { private long totalFailedPutCounter; private long totalBufferedPutCounter; @@ -339,7 +341,7 @@ public class HTableMultiplexer { return this.serverToAverageLatencyMap; } } - + private static class PutStatus { public final HRegionInfo regionInfo; public final Put put; @@ -406,7 +408,7 @@ public class HTableMultiplexer { private final ScheduledExecutorService executor; private final int maxRetryInQueue; private final AtomicInteger retryInQueue = new AtomicInteger(0); - + public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr, HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize, ExecutorService pool, ScheduledExecutorService executor) { @@ -443,7 +445,7 @@ public class HTableMultiplexer { private boolean resubmitFailedPut(PutStatus ps, HRegionLocation oldLoc) throws IOException { // Decrease the retry count final int retryCount = ps.retryCount - 1; - + if (retryCount <= 0) { // Update the failed counter and no retry any more. return false; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java index a135384..66dcdce 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", justification="stub used by ipc") +@InterfaceAudience.Private public abstract class RegionAdminServiceCallable implements RetryingCallable { protected final ClusterConnection connection; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index 2706718..3c4b39f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -41,6 +41,7 @@ public class RetriesExhaustedException extends IOException { /** * Datastructure that allows adding more info around Throwable incident. */ + @InterfaceAudience.Private public static class ThrowableWithExtraContext { private final Throwable t; private final long when; @@ -52,7 +53,7 @@ public class RetriesExhaustedException extends IOException { this.when = when; this.extras = extras; } - + @Override public String toString() { return new Date(this.when).toString() + ", " + extras + ", " + t.toString(); @@ -76,6 +77,7 @@ public class RetriesExhaustedException extends IOException { * @param numTries * @param exceptions List of exceptions that failed before giving up */ + @InterfaceAudience.Private public RetriesExhaustedException(final int numTries, final List exceptions) { super(getMessage(numTries, exceptions), diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java index 53ed7c9..6949a57 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.RegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Gets or Scans throw this exception if running without in-row scan flag @@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; * hbase.table.max.rowsize). */ @InterfaceAudience.Public +@InterfaceStability.Stable public class RowTooBigException extends RegionException { public RowTooBigException(String message) { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java index f594a8c..9f05997 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -24,6 +25,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; /** * Factory to create an {@link RpcRetryingCaller} */ +@InterfaceAudience.Private public class RpcRetryingCallerFactory { /** Configuration key for a custom {@link RpcRetryingCaller} */ diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 85704ff..57accce 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.protobuf.ServiceException; + import org.htrace.Trace; /** @@ -59,6 +61,7 @@ import org.htrace.Trace; * the first answer. If the answer comes from one of the secondary replica, it will * be marked as stale. */ +@InterfaceAudience.Private public class RpcRetryingCallerWithReadReplicas { static final Log LOG = LogFactory.getLog(RpcRetryingCallerWithReadReplicas.class); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java index e14bda6..f8a0e1c 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java @@ -47,6 +47,8 @@ public abstract class Batch { * {@link Batch.Call#call(Object)} * @param the return type from {@link Batch.Call#call(Object)} */ + @InterfaceAudience.Public + @InterfaceStability.Stable public interface Call { R call(T instance) throws IOException; } @@ -65,6 +67,8 @@ public abstract class Batch { * @param the return type from the associated {@link Batch.Call#call(Object)} * @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) */ + @InterfaceAudience.Public + @InterfaceStability.Stable public interface Callback { void update(byte[] region, byte[] row, R result); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java index cb8e5df..49134f1 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java @@ -38,8 +38,8 @@ package org.apache.hadoop.hbase.exceptions; import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Thrown when the client believes that we are trying to communicate to has diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java index 570eda2..5bfd2f3 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hbase.exceptions; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * Exception thrown if a mutation fails sanity checks. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRetryIOException { private static final long serialVersionUID = 1788783640409186240L; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java index c30955b..b6b3c32 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java @@ -19,8 +19,15 @@ */ package org.apache.hadoop.hbase.exceptions; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.DoNotRetryIOException; +/** + * Thrown when there is a timeout when trying to acquire a lock + */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class LockTimeoutException extends DoNotRetryIOException { private static final long serialVersionUID = -1770764924258999825L; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java index 2d66d54..51c960d 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.exceptions; import java.net.ConnectException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.ServerName; /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java index 90ec7cf..933e888 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java @@ -19,10 +19,15 @@ package org.apache.hadoop.hbase.exceptions; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * An error requesting an RPC protocol that the server is not serving. */ @SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Evolving public class UnknownProtocolException extends org.apache.hadoop.hbase.DoNotRetryIOException { private Class protocol; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java index 38854d4..91eef6a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.filter; import com.google.protobuf.InvalidProtocolBufferException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.util.Bytes; @@ -26,7 +29,8 @@ import org.apache.hadoop.hbase.util.Bytes; /** * A long comparator which numerical compares against the specified byte array */ - +@InterfaceAudience.Public +@InterfaceStability.Stable public class LongComparator extends ByteArrayComparable { private Long longValue; @@ -44,6 +48,7 @@ public class LongComparator extends ByteArrayComparable { /** * @return The comparator serialized using pb */ + @Override public byte [] toByteArray() { ComparatorProtos.LongComparator.Builder builder = ComparatorProtos.LongComparator.newBuilder(); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java index 127aa4c..70dd1f9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java @@ -79,6 +79,8 @@ public class RegexStringComparator extends ByteArrayComparable { private Engine engine; /** Engine implementation type (default=JAVA) */ + @InterfaceAudience.Public + @InterfaceStability.Stable public enum EngineType { JAVA, JONI @@ -153,6 +155,7 @@ public class RegexStringComparator extends ByteArrayComparable { /** * @return The comparator serialized using pb */ + @Override public byte [] toByteArray() { return engine.toByteArray(); } @@ -175,7 +178,7 @@ public class RegexStringComparator extends ByteArrayComparable { if (proto.hasEngine()) { EngineType engine = EngineType.valueOf(proto.getEngine()); comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(), - engine); + engine); } else { comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags()); } @@ -195,6 +198,7 @@ public class RegexStringComparator extends ByteArrayComparable { * @return true if and only if the fields of the comparator that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof RegexStringComparator)) return false; @@ -212,7 +216,7 @@ public class RegexStringComparator extends ByteArrayComparable { /** * This is an internal interface for abstracting access to different regular - * expression matching engines. + * expression matching engines. */ static interface Engine { /** @@ -220,7 +224,7 @@ public class RegexStringComparator extends ByteArrayComparable { * for matching */ String getPattern(); - + /** * Returns the set of configured match flags, a bit mask that may include * {@link Pattern} flags @@ -412,7 +416,7 @@ public class RegexStringComparator extends ByteArrayComparable { encoding = e.getEncoding(); } else { throw new IllegalCharsetNameException(name); - } + } } } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java index a91ecb5..ad4224b 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java @@ -19,11 +19,13 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Simple delegating controller for use with the {@link RpcControllerFactory} to help override * standard behavior of a {@link PayloadCarryingRpcController}. */ +@InterfaceAudience.Private public class DelegatingPayloadCarryingRpcController extends PayloadCarryingRpcController { private PayloadCarryingRpcController delegate; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 6d00adc..67e2524 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -27,6 +27,7 @@ import java.nio.ByteBuffer; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; @@ -49,6 +50,7 @@ import com.google.protobuf.Message; /** * Utility to help ipc'ing. */ +@InterfaceAudience.Private class IPCUtil { public static final Log LOG = LogFactory.getLog(IPCUtil.class); /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java index 8f1780c..f8ab23f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java @@ -22,11 +22,13 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.ReflectionUtils; /** * Factory to create a {@link PayloadCarryingRpcController} */ +@InterfaceAudience.Private public class RpcControllerFactory { public static final String CUSTOM_CONTROLLER_CONF_KEY = "hbase.rpc.controllerfactory.class"; @@ -39,7 +41,7 @@ public class RpcControllerFactory { public PayloadCarryingRpcController newController() { return new PayloadCarryingRpcController(); } - + public PayloadCarryingRpcController newController(final CellScanner cellScanner) { return new PayloadCarryingRpcController(cellScanner); } @@ -47,7 +49,7 @@ public class RpcControllerFactory { public PayloadCarryingRpcController newController(final List cellIterables) { return new PayloadCarryingRpcController(cellIterables); } - + public static RpcControllerFactory instantiate(Configuration configuration) { String rpcControllerFactoryClazz = diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index 0a4354a..5a64c65 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.util.StringUtils; import com.google.protobuf.Descriptors; @@ -51,6 +52,7 @@ import com.google.protobuf.Service; * *

*/ +@InterfaceAudience.Private public class ServerRpcController implements RpcController { /** * The exception thrown within diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java index a56f904..2ab2a5b 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java @@ -19,11 +19,15 @@ package org.apache.hadoop.hbase.ipc; + import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; +@InterfaceAudience.Private public class TimeLimitedRpcController implements RpcController { /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 4bbc77b..9c451b1 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -37,6 +37,7 @@ import java.util.Map.Entry; import java.util.NavigableSet; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -159,6 +160,7 @@ import com.google.protobuf.TextFormat; */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED", justification="None. Address sometime.") +@InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class public final class ProtobufUtil { private ProtobufUtil() { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java index 2675e3a..54a1545 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.quotas; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Generic quota exceeded exception for invalid settings */ +@InterfaceAudience.Private public class InvalidQuotaSettingsException extends DoNotRetryIOException { public InvalidQuotaSettingsException(String msg) { super(msg); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java index d9bea8c..056d498 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.quotas; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Generic quota exceeded exception */ +@InterfaceAudience.Private public class QuotaExceededException extends DoNotRetryIOException { public QuotaExceededException(String msg) { super(msg); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java index bcad943..32b9599 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java @@ -23,6 +23,7 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Describe the throttling result. @@ -31,6 +32,7 @@ import org.apache.commons.logging.LogFactory; * operation to go on the server if the waitInterval is grater than the one got * as result of this exception. */ +@InterfaceAudience.Private public class ThrottlingException extends QuotaExceededException { private static final long serialVersionUID = 1406576492085155743L; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java index 66781f1..937e943 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.HBaseException; /** @@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.exceptions.HBaseException; * store, loss of connection to a peer cluster or errors during deserialization of replication data. */ @InterfaceAudience.Public +@InterfaceStability.Stable public class ReplicationException extends HBaseException { private static final long serialVersionUID = -8885598603988198062L; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 35fa602..f115a39 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.replication; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Stoppable; @@ -26,6 +27,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** * A factory class for instantiating replication objects that deal with replication state. */ +@InterfaceAudience.Private public class ReplicationFactory { public static ReplicationQueues getReplicationQueues(final ZooKeeperWatcher zk, diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 91e0b05..8f01a76 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -75,6 +76,7 @@ import com.google.protobuf.InvalidProtocolBufferException; * * /hbase/replication/peers/1/tableCFs [Value: "table1; table2:cf1,cf3; table3:cfx,cfy"] */ +@InterfaceAudience.Private public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements ReplicationPeers { // Map of peer clusters keyed by their id @@ -110,16 +112,16 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re throw new IllegalArgumentException("Cannot add a peer with id=" + id + " because that id already exists."); } - + if(id.contains("-")){ throw new IllegalArgumentException("Found invalid peer name:" + id); } - + ZKUtil.createWithParents(this.zookeeper, this.peersZNode); List listOfOps = new ArrayList(); ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(this.peersZNode, id), toByteArray(peerConfig)); - // There is a race (if hbase.zookeeper.useMulti is false) + // There is a race (if hbase.zookeeper.useMulti is false) // b/w PeerWatcher and ReplicationZookeeper#add method to create the // peer-state znode. This happens while adding a peer // The peer state data is set as "ENABLED" by default. diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index cd19157..ab9a2c2 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -25,6 +25,8 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; /** @@ -32,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName; * It will extract the peerId if it's recovered as well as the dead region servers * that were part of the queue's history. */ +@InterfaceAudience.Private public class ReplicationQueueInfo { private static final Log LOG = LogFactory.getLog(ReplicationQueueInfo.class); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java index e8fa4df..fed1791 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java @@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.replication; import java.util.List; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * This provides an interface for clients of replication to view replication queues. These queues * keep track of the WALs that still need to be replicated to remote clusters. */ +@InterfaceAudience.Private public interface ReplicationQueuesClient { /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java index fba1fef..43262a0 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java @@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.replication; import java.util.List; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; +@InterfaceAudience.Private public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implements ReplicationQueuesClient { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java index 3ed51c7..635b021 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java @@ -27,6 +27,7 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; @@ -60,6 +61,7 @@ import org.apache.zookeeper.KeeperException; * * /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 [VALUE: 254] */ +@InterfaceAudience.Private public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements ReplicationQueues { /** Znode containing all replication queues for this region server. */ @@ -69,7 +71,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class); - public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf, + public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf, Abortable abortable) { super(zk, conf, abortable); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java index 2302438..1691b3f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication; import java.util.List; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -32,6 +33,7 @@ import org.apache.zookeeper.KeeperException; /** * This is a base class for maintaining replication state in zookeeper. */ +@InterfaceAudience.Private public abstract class ReplicationStateZKBase { /** diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index 1a19cdd..f9f2d43 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -24,6 +24,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Stoppable; @@ -37,6 +38,7 @@ import org.apache.zookeeper.KeeperException; * responsible for handling replication events that are defined in the ReplicationListener * interface. */ +@InterfaceAudience.Private public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements ReplicationTracker { private static final Log LOG = LogFactory.getLog(ReplicationTrackerZKImpl.class); diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index 895d067..5a31f26 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -61,7 +61,7 @@ public class HBaseSaslRpcClient { private final boolean fallbackAllowed; /** * Create a HBaseSaslRpcClient for an authentication method - * + * * @param method * the requested authentication method * @param token @@ -75,11 +75,11 @@ public class HBaseSaslRpcClient { public HBaseSaslRpcClient(AuthMethod method, Token token, String serverPrincipal, boolean fallbackAllowed) throws IOException { - this(method, token, serverPrincipal, fallbackAllowed, "authentication"); + this(method, token, serverPrincipal, fallbackAllowed, "authentication"); } /** * Create a HBaseSaslRpcClient for an authentication method - * + * * @param method * the requested authentication method * @param token @@ -134,8 +134,8 @@ public class HBaseSaslRpcClient { throw new IOException("Unable to find SASL client implementation"); } - protected SaslClient createDigestSaslClient(String[] mechanismNames, - String saslDefaultRealm, CallbackHandler saslClientCallbackHandler) + protected SaslClient createDigestSaslClient(String[] mechanismNames, + String saslDefaultRealm, CallbackHandler saslClientCallbackHandler) throws IOException { return Sasl.createSaslClient(mechanismNames, null, null, saslDefaultRealm, SaslUtil.SASL_PROPS, saslClientCallbackHandler); @@ -143,7 +143,7 @@ public class HBaseSaslRpcClient { protected SaslClient createKerberosSaslClient(String[] mechanismNames, String userFirstPart, String userSecondPart) throws IOException { - return Sasl.createSaslClient(mechanismNames, null, userFirstPart, + return Sasl.createSaslClient(mechanismNames, null, userFirstPart, userSecondPart, SaslUtil.SASL_PROPS, null); } @@ -154,16 +154,16 @@ public class HBaseSaslRpcClient { WritableUtils.readString(inStream)); } } - + /** * Do client side SASL authentication with server via the given InputStream * and OutputStream - * + * * @param inS * InputStream to use * @param outS * OutputStream to use - * @return true if connection is set up, or false if needs to switch + * @return true if connection is set up, or false if needs to switch * to simple Auth. * @throws IOException */ @@ -243,7 +243,7 @@ public class HBaseSaslRpcClient { /** * Get a SASL wrapped InputStream. Can be called only after saslConnect() has * been called. - * + * * @param in * the InputStream to wrap * @return a SASL wrapped InputStream @@ -259,7 +259,7 @@ public class HBaseSaslRpcClient { /** * Get a SASL wrapped OutputStream. Can be called only after saslConnect() has * been called. - * + * * @param out * the OutputStream to wrap * @return a SASL wrapped OutputStream @@ -287,6 +287,7 @@ public class HBaseSaslRpcClient { this.userPassword = SaslUtil.encodePassword(token.getPassword()); } + @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { NameCallback nc = null; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java index 9916a51..7bf5304 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -43,6 +43,9 @@ import com.google.common.collect.Maps; @InterfaceStability.Evolving public class Permission extends VersionedWritable { protected static final byte VERSION = 0; + + @InterfaceAudience.Public + @InterfaceStability.Evolving public enum Action { READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); @@ -148,6 +151,7 @@ public class Permission extends VersionedWritable { return result; } + @Override public String toString() { StringBuilder str = new StringBuilder("[Permission: ") .append("actions="); @@ -167,6 +171,7 @@ public class Permission extends VersionedWritable { } /** @return the object version number */ + @Override public byte getVersion() { return VERSION; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index 87371f7..e4758b0 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -354,6 +354,7 @@ public class TablePermission extends Permission { return result; } + @Override public String toString() { StringBuilder str = new StringBuilder("[TablePermission: "); if(namespace != null) { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java index 7d44ddc..7313989 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -183,6 +183,7 @@ public class UserPermission extends TablePermission { return result; } + @Override public String toString() { StringBuilder str = new StringBuilder("UserPermission: ") .append("user=").append(Bytes.toString(user)) diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java index 784aec6..604a21a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java @@ -42,7 +42,7 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier { protected long issueDate; protected long expirationDate; protected long sequenceNumber; - + public AuthenticationTokenIdentifier() { } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java index 4d87bdf..90dd0a7 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java @@ -20,11 +20,13 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; /* * This exception indicates that VisibilityController hasn't finished initialization. */ @InterfaceAudience.Public +@InterfaceStability.Evolving public class VisibilityControllerNotReadyException extends IOException { private static final long serialVersionUID = 1725986525207989173L; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java index f6817e7..05f3556 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java @@ -18,11 +18,13 @@ package org.apache.hadoop.hbase.snapshot; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Thrown when a snapshot could not be exported due to an error during the operation. */ @InterfaceAudience.Public +@InterfaceStability.Stable @SuppressWarnings("serial") public class ExportSnapshotException extends HBaseSnapshotException { diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java new file mode 100644 index 0000000..ace11ec --- /dev/null +++ hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java @@ -0,0 +1,273 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.lang.annotation.Annotation; +import java.lang.reflect.Modifier; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.ClassFinder.And; +import org.apache.hadoop.hbase.ClassFinder.FileNameFilter; +import org.apache.hadoop.hbase.ClassFinder.Not; +import org.apache.hadoop.hbase.ClassTestFinder.TestClassFilter; +import org.apache.hadoop.hbase.ClassTestFinder.TestFileNameFilter; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test cases for ensuring our client visible classes have annotations + * for {@link InterfaceAudience}. + * + * All classes in hbase-client and hbase-common module MUST have InterfaceAudience + * annotations. All InterfaceAudience.Public annotated classes MUST also have InterfaceStability + * annotations. Think twice about marking an interface InterfaceAudience.Public. Make sure that + * it is an interface, not a class (for most cases), and clients will actually depend on it. Once + * something is marked with Public, we cannot change the signatures within the major release. NOT + * everything in the hbase-client module or every java public class has to be marked with + * InterfaceAudience.Public. ONLY the ones that an hbase application will directly use (Table, Get, + * etc, versus ProtobufUtil). + * + * Also note that HBase has it's own annotations in hbase-annotations module with the same names + * as in Hadoop. You should use the HBase's classes. + * + * See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html + * and https://issues.apache.org/jira/browse/HBASE-10462. + */ +@Category(SmallTests.class) +public class TestInterfaceAudienceAnnotations { + + private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class); + + /** Selects classes with generated in their package name */ + class GeneratedClassFilter implements ClassFinder.ClassFilter { + @Override + public boolean isCandidateClass(Class c) { + return c.getPackage().getName().contains("generated"); + } + } + + /** Selects classes with one of the {@link InterfaceAudience} annotation in their class + * declaration. + */ + class InterfaceAudienceAnnotatedClassFilter implements ClassFinder.ClassFilter { + @Override + public boolean isCandidateClass(Class c) { + if (getAnnotation(c) != null) { + // class itself has a declared annotation. + return true; + } + + // If this is an internal class, look for the encapsulating class to see whether it has + // annotation. All inner classes of private classes are considered annotated. + return isAnnotatedPrivate(c.getEnclosingClass()); + } + + private boolean isAnnotatedPrivate(Class c) { + if (c == null) { + return false; + } + + Class ann = getAnnotation(c); + if (ann != null && + !InterfaceAudience.Public.class.equals(ann)) { + return true; + } + + return isAnnotatedPrivate(c.getEnclosingClass()); + } + + protected Class getAnnotation(Class c) { + // we should get only declared annotations, not inherited ones + Annotation[] anns = c.getDeclaredAnnotations(); + + for (Annotation ann : anns) { + // Hadoop clearly got it wrong for not making the annotation values (private, public, ..) + // an enum instead we have three independent annotations! + Class type = ann.annotationType(); + if (isInterfaceAudienceClass(type)) { + return type; + } + } + return null; + } + } + + /** Selects classes with one of the {@link InterfaceStability} annotation in their class + * declaration. + */ + class InterfaceStabilityAnnotatedClassFilter implements ClassFinder.ClassFilter { + @Override + public boolean isCandidateClass(Class c) { + if (getAnnotation(c) != null) { + // class itself has a declared annotation. + return true; + } + return false; + } + + protected Class getAnnotation(Class c) { + // we should get only declared annotations, not inherited ones + Annotation[] anns = c.getDeclaredAnnotations(); + + for (Annotation ann : anns) { + // Hadoop clearly got it wrong for not making the annotation values (private, public, ..) + // an enum instead we have three independent annotations! + Class type = ann.annotationType(); + if (isInterfaceStabilityClass(type)) { + return type; + } + } + return null; + } + } + + /** Selects classes with one of the {@link InterfaceAudience.Public} annotation in their + * class declaration. + */ + class InterfaceAudiencePublicAnnotatedClassFilter extends InterfaceAudienceAnnotatedClassFilter { + @Override + public boolean isCandidateClass(Class c) { + return (InterfaceAudience.Public.class.equals(getAnnotation(c))); + } + } + + /** + * Selects InterfaceAudience or InterfaceStability classes. Don't go meta!!! + */ + class IsInterfaceStabilityClassFilter implements ClassFinder.ClassFilter { + @Override + public boolean isCandidateClass(Class c) { + return + isInterfaceAudienceClass(c) || + isInterfaceStabilityClass(c); + } + } + + private boolean isInterfaceAudienceClass(Class c) { + return + c.equals(InterfaceAudience.Public.class) || + c.equals(InterfaceAudience.Private.class) || + c.equals(InterfaceAudience.LimitedPrivate.class); + } + + private boolean isInterfaceStabilityClass(Class c) { + return + c.equals(InterfaceStability.Stable.class) || + c.equals(InterfaceStability.Unstable.class) || + c.equals(InterfaceStability.Evolving.class); + } + + /** Selects classes that are declared public */ + class PublicClassFilter implements ClassFinder.ClassFilter { + @Override + public boolean isCandidateClass(Class c) { + int mod = c.getModifiers(); + return Modifier.isPublic(mod); + } + } + + /** Selects paths (jars and class dirs) only from the main code, not test classes */ + class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter { + @Override + public boolean isCandidatePath(String resourcePath, boolean isJar) { + return !resourcePath.contains("test-classes") && + !resourcePath.contains("tests.jar"); + } + } + + /** + * Checks whether all the classes in client and common modules contain + * {@link InterfaceAudience} annotations. + */ + @Test + public void testInterfaceAudienceAnnotation() + throws ClassNotFoundException, IOException, LinkageError { + + // find classes that are: + // In the main jar + // AND are public + // NOT test classes + // AND NOT generated classes + // AND are NOT annotated with InterfaceAudience + ClassFinder classFinder = new ClassFinder( + new MainCodeResourcePathFilter(), + new Not((FileNameFilter)new TestFileNameFilter()), + new And(new PublicClassFilter(), + new Not(new TestClassFilter()), + new Not(new GeneratedClassFilter()), + new Not(new IsInterfaceStabilityClassFilter()), + new Not(new InterfaceAudienceAnnotatedClassFilter())) + ); + + Set> classes = classFinder.findClasses(false); + + LOG.info("These are the classes that DO NOT have @InterfaceAudience annotation:"); + for (Class clazz : classes) { + LOG.info(clazz); + } + + Assert.assertEquals("All classes should have @InterfaceAudience annotation", + 0, classes.size()); + } + + /** + * Checks whether all the classes in client and common modules that are marked + * InterfaceAudience.Public also have {@link InterfaceStability} annotations. + */ + @Test + public void testInterfaceStabilityAnnotation() + throws ClassNotFoundException, IOException, LinkageError { + + // find classes that are: + // In the main jar + // AND are public + // NOT test classes + // AND NOT generated classes + // AND are annotated with InterfaceAudience.Public + // AND NOT annotated with InterfaceStability + ClassFinder classFinder = new ClassFinder( + new MainCodeResourcePathFilter(), + new Not((FileNameFilter)new TestFileNameFilter()), + new And(new PublicClassFilter(), + new Not(new TestClassFilter()), + new Not(new GeneratedClassFilter()), + new InterfaceAudiencePublicAnnotatedClassFilter(), + new Not(new IsInterfaceStabilityClassFilter()), + new Not(new InterfaceStabilityAnnotatedClassFilter())) + ); + + Set> classes = classFinder.findClasses(false); + + LOG.info("These are the classes that DO NOT have @InterfaceStability annotation:"); + for (Class clazz : classes) { + LOG.info(clazz); + } + + Assert.assertEquals("All classes that are marked with @InterfaceAudience.Public should " + + "have @InterfaceStability annotation as well", + 0, classes.size()); + } +} diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java index 8ad8584..86b4c32 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; @@ -26,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; * only sets the configuration through the {@link #setConf(Configuration)} * method */ +@InterfaceAudience.Private public class BaseConfigurable implements Configurable { private Configuration conf; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 0356bff..6001767 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -84,6 +84,7 @@ public final class HConstants { /** * Status codes used for return values of bulk operations. */ + @InterfaceAudience.Private public enum OperationStatusCode { NOT_RUN, SUCCESS, @@ -570,6 +571,7 @@ public final class HConstants { public static final String REGION_IMPL = "hbase.hregion.impl"; /** modifyTable op for replacing the table descriptor */ + @InterfaceAudience.Private public static enum Modify { CLOSE_REGION, TABLE_COMPACT, diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java index 1033f40..5e6f6f7 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java @@ -25,6 +25,8 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * The field or the parameter to which this annotation can be applied only when it * holds mutations for hbase:meta table. @@ -32,6 +34,7 @@ import java.lang.annotation.Target; @Documented @Target( { ElementType.LOCAL_VARIABLE, ElementType.PARAMETER }) @Retention(RetentionPolicy.CLASS) +@InterfaceAudience.Private public @interface MetaMutationAnnotation { } diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java index acdaf08..e1ceace 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java @@ -161,6 +161,8 @@ public class NamespaceDescriptor { return new Builder(ns); } + @InterfaceAudience.Public + @InterfaceStability.Evolving public static class Builder { private String bName; private Map bConfiguration = new TreeMap(); @@ -173,7 +175,7 @@ public class NamespaceDescriptor { private Builder(String name) { this.bName = name; } - + public Builder addConfiguration(Map configuration) { this.bConfiguration.putAll(configuration); return this; @@ -193,7 +195,7 @@ public class NamespaceDescriptor { if (this.bName == null){ throw new IllegalArgumentException("A name has to be specified in a namespace."); } - + NamespaceDescriptor desc = new NamespaceDescriptor(this.bName); desc.configuration = this.bConfiguration; return desc; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java index 73525c5..f658210 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java @@ -228,6 +228,8 @@ implements WritableComparable { /** A Comparator optimized for ImmutableBytesWritable. */ + @InterfaceAudience.Public + @InterfaceStability.Stable public static class Comparator extends WritableComparator { private BytesWritable.Comparator comparator = new BytesWritable.Comparator(); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java index a9fdd21..68e3ad4 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java @@ -26,11 +26,14 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * Copied from guava source code v15 (LimitedInputStream) * Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here * allows to be compatible with guava 11 to 15+. */ +@InterfaceAudience.Private public final class LimitInputStream extends FilterInputStream { private long left; private long mark = -1; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index 2a3c337..3420d0a 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -56,6 +56,8 @@ public final class Encryption { /** * Crypto context */ + @InterfaceAudience.Public + @InterfaceStability.Evolving public static class Context extends org.apache.hadoop.hbase.io.crypto.Context { /** The null crypto context */ diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java index 369d71e..dd6df0c 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.io.hadoopbackport; import java.io.IOException; import java.io.InputStream; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * The ThrottleInputStream provides bandwidth throttling on a specified * InputStream. It is implemented as a wrapper on top of another InputStream @@ -31,6 +33,7 @@ import java.io.InputStream; * (Thus, while the read-rate might exceed the maximum for a given short interval, * the average tends towards the specified maximum, overall.) */ +@InterfaceAudience.Private public class ThrottledInputStream extends InputStream { private final InputStream rawStream; @@ -47,7 +50,7 @@ public class ThrottledInputStream extends InputStream { } public ThrottledInputStream(InputStream rawStream, long maxBytesPerSec) { - assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid"; + assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid"; this.rawStream = rawStream; this.maxBytesPerSec = maxBytesPerSec; } diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java index f2a16ca..eced0ff 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java @@ -28,7 +28,6 @@ import org.apache.hadoop.util.ReflectionUtils; /** * Provide an instance of a user. Allows custom {@link User} creation. */ - @InterfaceAudience.Private public class UserProvider extends BaseConfigurable { diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java index b7fa574..b2a36d7 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hbase.trace; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.htrace.HTraceConfiguration; +@InterfaceAudience.Private public class HBaseHTraceConfiguration extends HTraceConfiguration { public static final String KEY_PREFIX = "hbase."; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java index c4fcec4..eb74ea1 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java @@ -24,6 +24,7 @@ import java.util.HashSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.htrace.SpanReceiver; import org.htrace.Trace; @@ -32,6 +33,7 @@ import org.htrace.Trace; * hbase-site.xml, adding those SpanReceivers to the Tracer, and closing those * SpanReceivers when appropriate. */ +@InterfaceAudience.Private public class SpanReceiverHost { public static final String SPAN_RECEIVERS_CONF_KEY = "hbase.trace.spanreceiver.classes"; private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java index ecc9c80..3d545f6 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.types; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Order; import org.apache.hadoop.hbase.util.PositionedByteRange; @@ -28,6 +30,8 @@ import com.google.protobuf.Message; * A base-class for {@link DataType} implementations backed by protobuf. See * {@code PBKeyValue} in {@code hbase-examples} module. */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public abstract class PBType implements DataType { @Override public boolean isOrderPreserving() { diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java index c5aebab..cd41658 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * An abstract implementation of the ByteRange API */ +@InterfaceAudience.Private +@InterfaceStability.Evolving public abstract class AbstractByteRange implements ByteRange { public static final int UNSET_HASH_VALUE = -1; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java index 26bcf97..77f7143 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.util; import java.nio.ByteBuffer; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + import com.google.common.annotations.VisibleForTesting; /** @@ -28,6 +31,8 @@ import com.google.common.annotations.VisibleForTesting; * {@link #compareTo(ByteRange)}, {@link #hashCode()}, or * {@link #equals(Object)}. {@code Position} is retained by copy operations. */ +@InterfaceAudience.Private +@InterfaceStability.Evolving public abstract class AbstractPositionedByteRange extends AbstractByteRange implements PositionedByteRange { /** @@ -74,7 +79,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl /** * Update the beginning of this range. {@code offset + length} may not be * greater than {@code bytes.length}. Resets {@code position} to 0. - * + * * @param offset * the new start of this range. * @return this. @@ -90,7 +95,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl * Update the length of this range. {@code offset + length} should not be * greater than {@code bytes.length}. If {@code position} is greater than the * new {@code length}, sets {@code position} to {@code length}. - * + * * @param length * The new length of this range. * @return this. @@ -153,28 +158,28 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl @Override public abstract PositionedByteRange put(byte[] val, int offset, int length); - + @Override - public abstract PositionedByteRange putInt(int index, int val); + public abstract PositionedByteRange putInt(int index, int val); @Override public abstract PositionedByteRange putLong(int index, long val); - + @Override public abstract PositionedByteRange putShort(int index, short val); - + @Override public abstract PositionedByteRange putInt(int val); - + @Override public abstract PositionedByteRange putLong(long val); - + @Override public abstract PositionedByteRange putShort(short val); - + @Override public abstract int putVLong(int index, long val); - + @Override public abstract int putVLong(long val); /** diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java index 76e2549..b331c32 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java @@ -117,8 +117,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; *
* version: 2.2.1 */ -@InterfaceAudience.Public -@InterfaceStability.Stable +@InterfaceAudience.Private public class Base64 { /* ******** P U B L I C F I E L D S ******** */ diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java index d89d337..c3fa547 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java @@ -30,12 +30,15 @@ import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * A completion service, close to the one available in the JDK 1.7 * However, this ones keeps the list of the future, and allows to cancel them all. * This means as well that it can be used for a small set of tasks only. *
Implementation is not Thread safe. */ +@InterfaceAudience.Private public class BoundedCompletionService { private final Executor executor; private final List> tasks; // alls the tasks diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java index cc09c3a..414832d 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java @@ -23,12 +23,15 @@ import java.io.IOException; import java.lang.reflect.Constructor; import java.util.zip.Checksum; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * Utility class that is used to generate a Checksum object. * The Checksum implementation is pluggable and an application * can specify their own class that implements their own * Checksum algorithm. */ +@InterfaceAudience.Private public class ChecksumFactory { static private final Class[] EMPTY_ARRAY = new Class[]{}; @@ -51,7 +54,7 @@ public class ChecksumFactory { * @param className classname for which an constructor is created * @return a new Constructor object */ - static public Constructor newConstructor(String className) + static public Constructor newConstructor(String className) throws IOException { try { Class clazz = getClassByName(className); @@ -88,7 +91,7 @@ public class ChecksumFactory { * @return the class object. * @throws ClassNotFoundException if the class is not found. */ - static private Class getClassByName(String name) + static private Class getClassByName(String name) throws ClassNotFoundException { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); return Class.forName(name, true, classLoader); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java index 86af5e7..95df769 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java @@ -24,13 +24,15 @@ import java.util.zip.Checksum; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Checksum types. The Checksum type is a one byte number * that stores a representation of the checksum algorithm - * used to encode a hfile. The ordinal of these cannot + * used to encode a hfile. The ordinal of these cannot * change or else you risk breaking all existing HFiles out there. */ +@InterfaceAudience.Private public enum ChecksumType { NULL((byte)0) { @@ -70,7 +72,7 @@ public enum ChecksumType { LOG.trace(PURECRC32 + " not available."); } try { - // The default checksum class name is java.util.zip.CRC32. + // The default checksum class name is java.util.zip.CRC32. // This is available on all JVMs. if (ctor == null) { ctor = ChecksumFactory.newConstructor(JDKCRC); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java index 2180b4d..0f00132 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java @@ -24,12 +24,15 @@ import java.util.Collection; import java.util.List; import java.util.NoSuchElementException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * A collection class that contains multiple sub-lists, which allows us to not copy lists. * This class does not support modification. The derived classes that add modifications are * not thread-safe. * NOTE: Doesn't implement list as it is not necessary for current usage, feel free to add. */ +@InterfaceAudience.Private public class ConcatenatedLists implements Collection { protected final ArrayList> components = new ArrayList>(); protected int size = 0; diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java index d56055a..688b51a 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java @@ -22,6 +22,8 @@ import java.io.InterruptedIOException; import java.net.SocketTimeoutException; import java.nio.channels.ClosedByInterruptException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * This class handles the different interruption classes. * It can be: @@ -31,6 +33,7 @@ import java.nio.channels.ClosedByInterruptException; * - SocketTimeoutException inherits InterruptedIOException but is not a real * interruption, so we have to distinguish the case. This pattern is unfortunately common. */ +@InterfaceAudience.Private public class ExceptionUtil { /** diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java index cb96f3e..89014db 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java @@ -18,6 +18,9 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * This is a very fast, non-cryptographic hash suitable for general hash-based * lookup. See http://code.google.com/p/smhasher/wiki/MurmurHash3 for details. @@ -25,6 +28,8 @@ package org.apache.hadoop.hbase.util; *

MurmurHash3 is the successor to MurmurHash2. It comes in 3 variants, and * the 32-bit version targets low latency for hash table use.

*/ +@InterfaceAudience.Public +@InterfaceStability.Stable public class MurmurHash3 extends Hash { private static MurmurHash3 _instance = new MurmurHash3(); diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index 0e50dfb..8c8f618 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -@InterfaceAudience.Public -@InterfaceStability.Evolving +@InterfaceAudience.Private public class PrettyPrinter { public enum Unit { diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java index 6f508e2..c14f1e2 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * Exception thrown when a read only byte range is modified */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class ReadOnlyByteRangeException extends UnsupportedOperationException { public ReadOnlyByteRangeException() { diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java index db3ca0f..4d5e5b5 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java @@ -17,13 +17,18 @@ */ package org.apache.hadoop.hbase.util; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + /** * A read only version of the {@link ByteRange}. */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class SimpleByteRange extends AbstractByteRange { public SimpleByteRange() { } - + public SimpleByteRange(int capacity) { this(new byte[capacity]); } @@ -110,7 +115,7 @@ public class SimpleByteRange extends AbstractByteRange { } return clone; } - + @Override public ByteRange shallowCopySubRange(int innerOffset, int copyLength) { SimpleByteRange clone = new SimpleByteRange(bytes, offset + innerOffset, @@ -120,7 +125,7 @@ public class SimpleByteRange extends AbstractByteRange { } return clone; } - + @Override public boolean equals(Object thatObject) { if (thatObject == null){ diff --git hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java index c15ace9..d46537c 100644 --- hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java +++ hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java @@ -62,6 +62,43 @@ public class ClassFinder { boolean isCandidateClass(Class c); }; + public static class Not implements ResourcePathFilter, FileNameFilter, ClassFilter { + private ResourcePathFilter resourcePathFilter; + private FileNameFilter fileNameFilter; + private ClassFilter classFilter; + + public Not(ResourcePathFilter resourcePathFilter){this.resourcePathFilter = resourcePathFilter;} + public Not(FileNameFilter fileNameFilter){this.fileNameFilter = fileNameFilter;} + public Not(ClassFilter classFilter){this.classFilter = classFilter;} + + @Override + public boolean isCandidatePath(String resourcePath, boolean isJar) { + return !resourcePathFilter.isCandidatePath(resourcePath, isJar); + } + @Override + public boolean isCandidateFile(String fileName, String absFilePath) { + return !fileNameFilter.isCandidateFile(fileName, absFilePath); + } + @Override + public boolean isCandidateClass(Class c) { + return !classFilter.isCandidateClass(c); + } + } + + public static class And implements ClassFilter { + ClassFilter[] classFilters; + public And(ClassFilter...classFilters) { this.classFilters = classFilters; } + @Override + public boolean isCandidateClass(Class c) { + for (ClassFilter filter : classFilters) { + if (!filter.isCandidateClass(c)) { + return false; + } + } + return true; + } + } + public ClassFinder() { this(null, null, null); } diff --git hbase-protocol/pom.xml hbase-protocol/pom.xml index 297a7af..7787c52 100644 --- hbase-protocol/pom.xml +++ hbase-protocol/pom.xml @@ -103,6 +103,10 @@ + + org.apache.hbase + hbase-annotations + com.google.protobuf diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java index ab354bc..5b10b83 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import com.google.protobuf.ByteString; import com.google.protobuf.HBaseZeroCopyByteString; @@ -26,6 +27,7 @@ import com.google.protobuf.HBaseZeroCopyByteString; /** * Hack to workaround HBASE-1304 issue that keeps bubbling up when a mapreduce context. */ +@InterfaceAudience.Private public class ByteStringer { private static final Log LOG = LogFactory.getLog(ByteStringer.class); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java index f34b83d..0e03a42 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.zip.Checksum; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.util.ChecksumType; /** * Utility methods to compute and validate checksums. */ +@InterfaceAudience.Private public class ChecksumUtil { /** This is used to reserve space in a byte buffer */