commit 13a1f411b3fcb99336e0775b635b812a60f69b22 Author: Todd Lipcon Date: Tue Nov 29 18:41:41 2011 -0800 annotations diff --git pom.xml pom.xml index 10dc81d..2f3412a 100644 --- pom.xml +++ pom.xml @@ -1085,6 +1085,15 @@ + + + org.apache.hadoop + hadoop-annotations + provided + + 0.23.1-SNAPSHOT + + tomcat jasper-runtime diff --git src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java index 9b1d021..72a7e15 100644 --- src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java +++ src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java @@ -21,6 +21,7 @@ import java.io.IOException; * Thrown during flush if the possibility snapshot content was not properly * persisted into store files. Response should include replay of hlog content. */ +@InterfaceAudience.Private public class DroppedSnapshotException extends IOException { private static final long serialVersionUID = -5463156580831677374L; @@ -38,4 +39,4 @@ public class DroppedSnapshotException extends IOException { public DroppedSnapshotException() { super(); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 8d22fb9..b13d165 100644 --- src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.util.VersionInfo; /** * Adds HBase configuration files to a Configuration */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HBaseConfiguration extends Configuration { private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class); diff --git src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index 29c59af..eed4019 100644 --- src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -31,6 +31,7 @@ import java.util.TreeSet; /** * Data structure to describe the distribution of HDFS blocks amount hosts */ +@InterfaceAudience.Private public class HDFSBlocksDistribution { private Map hostAndWeights = null; private long uniqueBlocksTotalWeight = 0; diff --git src/main/java/org/apache/hadoop/hbase/HRegionLocation.java src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index 85fb91d..3e0b414 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.util.Addressing; * instances are the same if they refer to the same 'location' (the same * hostname and port), though they may be carrying different regions. */ +@InterfaceAudience.Private public class HRegionLocation implements Comparable { private final HRegionInfo regionInfo; private final String hostname; @@ -131,4 +132,4 @@ public class HRegionLocation implements Comparable { if (result != 0) return result; return this.port - o.getPort(); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/HServerInfo.java src/main/java/org/apache/hadoop/hbase/HServerInfo.java index 5934246..074d511 100644 --- src/main/java/org/apache/hadoop/hbase/HServerInfo.java +++ src/main/java/org/apache/hadoop/hbase/HServerInfo.java @@ -40,6 +40,7 @@ import org.apache.hadoop.io.WritableComparable; * @deprecated Use {@link InetSocketAddress} and or {@link ServerName} and or * {@link HServerLoad} */ +@InterfaceAudience.Private public class HServerInfo extends VersionedWritable implements WritableComparable { private static final byte VERSION = 1; diff --git src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 84a0d1a..f84b3d0 100644 --- src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -45,6 +45,8 @@ import org.apache.hadoop.io.WritableComparable; * .META. , is the table is read only, the maximum size of the memstore, * when the region split should occur, coprocessors associated with it etc... */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HTableDescriptor implements WritableComparable { /** diff --git src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index bb2b666..4b056f1 100644 --- src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -25,6 +25,8 @@ import java.io.IOException; * Thrown if a request is table schema modification is requested but * made for an invalid family name. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class InvalidFamilyOperationException extends IOException { private static final long serialVersionUID = 1L << 22 - 1L; /** default constructor */ diff --git src/main/java/org/apache/hadoop/hbase/KeyValue.java src/main/java/org/apache/hadoop/hbase/KeyValue.java index be7e2d8..8d22b44 100644 --- src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -63,6 +63,8 @@ import org.apache.hadoop.io.Writable; * be < Integer.MAX_SIZE. * The column does not contain the family/qualifier delimiter, {@link #COLUMN_FAMILY_DELIMITER} */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class KeyValue implements Writable, HeapSize { static final Log LOG = LogFactory.getLog(KeyValue.class); // TODO: Group Key-only comparators and operations into a Key class, just diff --git src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 5fcc39c..8f2c5b3 100644 --- src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; * instead of 60000. * */ +@InterfaceAudience.Private public class LocalHBaseCluster { static final Log LOG = LogFactory.getLog(LocalHBaseCluster.class); private final List masterThreads = diff --git src/main/java/org/apache/hadoop/hbase/MasterAddressTracker.java src/main/java/org/apache/hadoop/hbase/MasterAddressTracker.java index 2fdc6ba..46f3b5a 100644 --- src/main/java/org/apache/hadoop/hbase/MasterAddressTracker.java +++ src/main/java/org/apache/hadoop/hbase/MasterAddressTracker.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; *

* You can get the current master via {@link #getMasterAddress()} */ +@InterfaceAudience.Private public class MasterAddressTracker extends ZooKeeperNodeTracker { /** * Construct a master address listener with the specified diff --git src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 32da8cb..73bb758 100644 --- src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hbase.util.Bytes; * Thrown by a region server if it is sent a request for a region it is not * serving. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class NotServingRegionException extends IOException { private static final long serialVersionUID = 1L << 17 - 1L; @@ -50,4 +52,4 @@ public class NotServingRegionException extends IOException { public NotServingRegionException(final byte [] s) { super(Bytes.toString(s)); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/ServerName.java src/main/java/org/apache/hadoop/hbase/ServerName.java index d6d869a..300eb9e 100644 --- src/main/java/org/apache/hadoop/hbase/ServerName.java +++ src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.util.Bytes; * *

Immutable. */ +@InterfaceAudience.Private public class ServerName implements Comparable { /** * Version for this class. @@ -287,4 +288,4 @@ public class ServerName implements Comparable { return SERVERNAME_PATTERN.matcher(str).matches()? new ServerName(str): new ServerName(str, NON_STARTCODE); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java index dc6da43..9852a16 100644 --- src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase; /** Thrown when a table can not be located */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class TableNotFoundException extends RegionException { private static final long serialVersionUID = 993179627856392526L; diff --git src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java index 8ca50a9..5b24e7e 100644 --- src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java +++ src/main/java/org/apache/hadoop/hbase/UnknownRowLockException.java @@ -23,6 +23,8 @@ package org.apache.hadoop.hbase; /** * Thrown if a region server is passed an unknown row lock id */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class UnknownRowLockException extends DoNotRetryIOException { private static final long serialVersionUID = 993179627856392526L; @@ -38,4 +40,4 @@ public class UnknownRowLockException extends DoNotRetryIOException { public UnknownRowLockException(String s) { super(s); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java index 13f2f6c..92f8868 100644 --- src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java +++ src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java @@ -26,6 +26,8 @@ package org.apache.hadoop.hbase; * scanner lease on the serverside has expired OR the serverside is closing * down and has cancelled all leases. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class UnknownScannerException extends DoNotRetryIOException { private static final long serialVersionUID = 993179627856392526L; @@ -41,4 +43,4 @@ public class UnknownScannerException extends DoNotRetryIOException { public UnknownScannerException(String s) { super(s); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java index f45bbbd..8698446 100644 --- src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java +++ src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Start an Avro server */ +@InterfaceAudience.Private public class AvroServer { /** diff --git src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java index abd2ae6..6e31bc7 100644 --- src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java +++ src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.util.Bytes; +@InterfaceAudience.Private public class AvroUtil { // diff --git src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 1c49dc5..b0fb103 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -56,6 +56,7 @@ import org.apache.hadoop.ipc.RemoteException; *

Call {@link #start()} to start up operation. Call {@link #stop()}} to * interrupt waits and close up shop. */ +@InterfaceAudience.Private public class CatalogTracker { // TODO: This class needs a rethink. The original intent was that it would be // the one-stop-shop for root and meta locations and that it would get this diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index 19fee5c..53de84d 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.util.Writables; * TODO: Put MetaReader and MetaEditor together; doesn't make sense having * them distinct. */ +@InterfaceAudience.Private public class MetaEditor { // TODO: Strip CatalogTracker from this class. Its all over and in the end // its only used to get its Configuration so we can get associated diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java index 01aa515..5c8462e 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.util.Writables; * @deprecated Used migration from 0.90 to 0.92 so will be going away in next * release */ +@InterfaceAudience.Private public class MetaMigrationRemovingHTD { private static final Log LOG = LogFactory.getLog(MetaMigrationRemovingHTD.class); diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index e5e60a8..e78be1b 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ipc.RemoteException; /** * Reads region and assignment information from .META.. */ +@InterfaceAudience.Private public class MetaReader { // TODO: Strip CatalogTracker from this class. Its all over and in the end // its only used to get its Configuration so we can get associated diff --git src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java index 1cbf1b6..5d99174 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java +++ src/main/java/org/apache/hadoop/hbase/catalog/RootLocationEditor.java @@ -30,6 +30,7 @@ import org.apache.zookeeper.KeeperException; /** * Makes changes to the location of -ROOT- in ZooKeeper. */ +@InterfaceAudience.Private public class RootLocationEditor { private static final Log LOG = LogFactory.getLog(RootLocationEditor.class); @@ -69,4 +70,4 @@ public class RootLocationEditor { Bytes.toBytes(location.toString())); } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/Action.java src/main/java/org/apache/hadoop/hbase/client/Action.java index 40b0f2e..337e403 100644 --- src/main/java/org/apache/hadoop/hbase/client/Action.java +++ src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -32,6 +32,8 @@ import org.apache.hadoop.io.Writable; * {@link HTable::batch} to associate the action with it's region and maintain * the index from the original request. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Action implements Writable, Comparable { private Row action; diff --git src/main/java/org/apache/hadoop/hbase/client/Append.java src/main/java/org/apache/hadoop/hbase/client/Append.java index d5d591e..f56b7a8 100644 --- src/main/java/org/apache/hadoop/hbase/client/Append.java +++ src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -41,6 +41,8 @@ import org.apache.hadoop.io.Writable; * row to append to. At least one column to append must be specified using the * {@link #add(byte[], byte[], byte[])} method. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Append extends Mutation implements Writable { // TODO: refactor to derive from Put? private static final String RETURN_RESULTS = "_rr_"; diff --git src/main/java/org/apache/hadoop/hbase/client/Delete.java src/main/java/org/apache/hadoop/hbase/client/Delete.java index e524d8e..37aa20e 100644 --- src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -64,6 +64,8 @@ import java.util.Map; * deleteFamily -- then you need to use the method overrides that take a * timestamp. The constructor timestamp is not referenced. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Delete extends Mutation implements Writable, Row, Comparable { private static final byte DELETE_VERSION = (byte)3; diff --git src/main/java/org/apache/hadoop/hbase/client/Get.java src/main/java/org/apache/hadoop/hbase/client/Get.java index 93c9e89..9180ac9 100644 --- src/main/java/org/apache/hadoop/hbase/client/Get.java +++ src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -63,6 +63,8 @@ import java.util.TreeSet; *

* To add a filter, execute {@link #setFilter(Filter) setFilter}. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Get extends OperationWithAttributes implements Writable, Row, Comparable { private static final byte GET_VERSION = (byte)2; diff --git src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index aa8512b..39140c4 100644 --- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -72,6 +72,8 @@ import org.apache.hadoop.util.StringUtils; *

Currently HBaseAdmin instances are not expected to be long-lived. For * example, an HBaseAdmin instance will not ride over a Master restart. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HBaseAdmin implements Abortable, Closeable { private final Log LOG = LogFactory.getLog(this.getClass().getName()); // private final HConnection connection; diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 6af1f82..4a240ef 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -132,6 +132,8 @@ import org.apache.zookeeper.KeeperException; * cleanup to the client. */ @SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable public class HConnectionManager { // An LRU Map of HConnectionKey -> HConnection (TableServer). All // access must be synchronized. This map is not private because tests diff --git src/main/java/org/apache/hadoop/hbase/client/HTable.java src/main/java/org/apache/hadoop/hbase/client/HTable.java index e59f99c..120ab98 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -107,6 +107,8 @@ import org.apache.hadoop.io.DataOutputBuffer; * @see HConnection * @see HConnectionManager */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HTable implements HTableInterface, Closeable { private static final Log LOG = LogFactory.getLog(HTable.class); private HConnection connection; diff --git src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java index 90f6cb9..1904be1 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java +++ src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java @@ -28,6 +28,8 @@ import java.io.IOException; * * @since 0.21.0 */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HTableFactory implements HTableInterfaceFactory { @Override public HTableInterface createHTableInterface(Configuration config, @@ -43,4 +45,4 @@ public class HTableFactory implements HTableInterfaceFactory { public void releaseHTableInterface(HTableInterface table) throws IOException { table.close(); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/HTablePool.java src/main/java/org/apache/hadoop/hbase/client/HTablePool.java index a5c198f..fa641e4 100755 --- src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ src/main/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hbase.util.PoolMap.PoolType; * Pool will manage its own connections to the cluster. See * {@link HConnectionManager}. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HTablePool implements Closeable { private final PoolMap tables; private final int maxSize; diff --git src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java index bc0872a..9191740 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java +++ src/main/java/org/apache/hadoop/hbase/client/HTableUtil.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.client.Row; * * */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HTableUtil { private static final int INITIAL_LIST_SIZE = 250; diff --git src/main/java/org/apache/hadoop/hbase/client/Increment.java src/main/java/org/apache/hadoop/hbase/client/Increment.java index 59b0214..f68e2f6 100644 --- src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -43,6 +43,8 @@ import org.apache.hadoop.io.Writable; * to increment. At least one column to increment must be specified using the * {@link #addColumn(byte[], byte[], long)} method. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Increment implements Writable { private static final byte INCREMENT_VERSION = (byte)2; diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 4135e55..ce73fd4 100644 --- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hbase.util.Writables; * Although public visibility, this is not a public-facing API and may evolve in * minor releases. */ +@InterfaceAudience.Private +@InterfaceStability.Evolving public class MetaScanner { private static final Log LOG = LogFactory.getLog(MetaScanner.class); /** diff --git src/main/java/org/apache/hadoop/hbase/client/MultiPut.java src/main/java/org/apache/hadoop/hbase/client/MultiPut.java index 9235e2d..2cef2e5 100644 --- src/main/java/org/apache/hadoop/hbase/client/MultiPut.java +++ src/main/java/org/apache/hadoop/hbase/client/MultiPut.java @@ -42,6 +42,8 @@ import java.util.TreeSet; * @deprecated Use MultiAction instead * Data type class for putting multiple regions worth of puts in one RPC. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class MultiPut extends Operation implements Writable { public HServerAddress address; // client code ONLY diff --git src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java index 7e0311a..515305a 100644 --- src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java +++ src/main/java/org/apache/hadoop/hbase/client/MultiPutResponse.java @@ -33,6 +33,8 @@ import java.util.TreeMap; * @deprecated Replaced by MultiResponse * Response class for MultiPut. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class MultiPutResponse implements Writable { protected MultiPut request; // used in client code ONLY diff --git src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 290e4c7..c120244 100644 --- src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -42,6 +42,8 @@ import java.util.TreeMap; /** * A container for Result objects, grouped by regionName. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class MultiResponse implements Writable { // map of regionName to list of (Results paired to the original index for that diff --git src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 4f33914..f13172b 100644 --- src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hbase.RegionException; /** * Thrown when no region server can be found for a region */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class NoServerForRegionException extends RegionException { private static final long serialVersionUID = 1L << 11 - 1L; diff --git src/main/java/org/apache/hadoop/hbase/client/Put.java src/main/java/org/apache/hadoop/hbase/client/Put.java index c09b339..67085b5 100644 --- src/main/java/org/apache/hadoop/hbase/client/Put.java +++ src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -43,6 +43,8 @@ import java.util.TreeMap; * for each column to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or * {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Put extends Mutation implements HeapSize, Writable, Row, Comparable { private static final byte PUT_VERSION = (byte)2; diff --git src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java index d223860..756803c 100644 --- src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java +++ src/main/java/org/apache/hadoop/hbase/client/RegionOfflineException.java @@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.RegionException; /** Thrown when a table can not be located */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class RegionOfflineException extends RegionException { private static final long serialVersionUID = 466008402L; /** default constructor */ diff --git src/main/java/org/apache/hadoop/hbase/client/Result.java src/main/java/org/apache/hadoop/hbase/client/Result.java index 57f5ecb..70a9462 100644 --- src/main/java/org/apache/hadoop/hbase/client/Result.java +++ src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -65,6 +65,8 @@ import org.apache.hadoop.io.Writable; * through {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()}, * {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Result implements Writable, WritableWithSize { private static final byte RESULT_VERSION = (byte)1; diff --git src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index b9042f6..0889708 100644 --- src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -23,6 +23,8 @@ import java.util.List; * Exception thrown by HTable methods when an attempt to do something (like * commit changes) fails after a bunch of retries. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class RetriesExhaustedException extends IOException { private static final long serialVersionUID = 1876775844L; @@ -102,4 +104,4 @@ public class RetriesExhaustedException extends IOException { } return buffer.toString(); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java index 4a98c5e..a525617 100644 --- src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java +++ src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java @@ -42,6 +42,8 @@ import java.util.Set; * {@link #getCause(int)}, {@link #getRow(int)} and {@link #getAddress(int)}. */ @SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable public class RetriesExhaustedWithDetailsException extends RetriesExhaustedException { List exceptions; diff --git src/main/java/org/apache/hadoop/hbase/client/RowLock.java src/main/java/org/apache/hadoop/hbase/client/RowLock.java index 56b0787..f20c7e2 100644 --- src/main/java/org/apache/hadoop/hbase/client/RowLock.java +++ src/main/java/org/apache/hadoop/hbase/client/RowLock.java @@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.client; /** * Holds row name and lock id. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class RowLock { private byte [] row = null; private long lockId = -1L; diff --git src/main/java/org/apache/hadoop/hbase/client/Scan.java src/main/java/org/apache/hadoop/hbase/client/Scan.java index 0fa6b02..9eebaf3 100644 --- src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -81,6 +81,8 @@ import java.util.TreeSet; * Expert: To explicitly disable server-side block caching for this scan, * execute {@link #setCacheBlocks(boolean)}. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Scan extends OperationWithAttributes implements Writable { private static final String RAW_ATTR = "_raw_"; diff --git src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 4a4679c..08cb08a 100644 --- src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -36,6 +36,8 @@ import org.apache.hadoop.net.DNS; * Retries scanner operations such as create, next, etc. * Used by {@link ResultScanner}s made by {@link HTable}. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ScannerCallable extends ServerCallable { private static final Log LOG = LogFactory.getLog(ScannerCallable.class); private long scannerId = -1L; diff --git src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java index 5a10b0e..0939f1b 100644 --- src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java +++ src/main/java/org/apache/hadoop/hbase/client/ScannerTimeoutException.java @@ -25,6 +25,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; /** * Thrown when a scanner has timed out. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ScannerTimeoutException extends DoNotRetryIOException { private static final long serialVersionUID = 8788838690290688313L; diff --git src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java index d99f02a..4cc97a9 100644 --- src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.io.hfile.Compression; /** * Immutable HColumnDescriptor */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { /** @@ -90,4 +92,4 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { public void setCompressionType(Compression.Algorithm type) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java index 27d1faa..2a8f6cc 100644 --- src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; /** * Read-only table descriptor. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class UnmodifyableHTableDescriptor extends HTableDescriptor { /** Default constructor */ public UnmodifyableHTableDescriptor() { diff --git src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 0d4a9e4..99e4b23 100644 --- src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -55,6 +55,8 @@ import org.apache.hadoop.hbase.util.Pair; * parameter type. For average and std, it returns a double value. For row * count, it returns a long value. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class AggregationClient { private static final Log log = LogFactory.getLog(AggregationClient.class); diff --git src/main/java/org/apache/hadoop/hbase/client/coprocessor/Exec.java src/main/java/org/apache/hadoop/hbase/client/coprocessor/Exec.java index 504bd77..6336d3f 100644 --- src/main/java/org/apache/hadoop/hbase/client/coprocessor/Exec.java +++ src/main/java/org/apache/hadoop/hbase/client/coprocessor/Exec.java @@ -51,6 +51,8 @@ import java.lang.reflect.Method; * @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) * @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class Exec extends Invocation implements Row { private Configuration conf = HBaseConfiguration.create(); /** Row key used as a reference for any region lookups */ diff --git src/main/java/org/apache/hadoop/hbase/client/coprocessor/ExecResult.java src/main/java/org/apache/hadoop/hbase/client/coprocessor/ExecResult.java index 065b3f2..e246dee 100644 --- src/main/java/org/apache/hadoop/hbase/client/coprocessor/ExecResult.java +++ src/main/java/org/apache/hadoop/hbase/client/coprocessor/ExecResult.java @@ -46,6 +46,8 @@ import java.io.Serializable; * @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call) * @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback) */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ExecResult implements Writable { private byte[] regionName; private Object value; diff --git src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java index c37b5fd..6c8dee6 100644 --- src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java +++ src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.util.Bytes; * TestAggregateProtocol methods for its sample usage. * Its methods handle null arguments gracefully. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class LongColumnInterpreter implements ColumnInterpreter { public Long getValue(byte[] colFamily, byte[] colQualifier, KeyValue kv) @@ -103,4 +105,4 @@ public class LongColumnInterpreter implements ColumnInterpreter { return o; } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java index c5faafb..d0ab35f 100644 --- src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java +++ src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java @@ -45,6 +45,8 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong; * However, there is no need for this. So they are defined under scan operation * for now. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ScanMetrics implements Writable { private static final byte SCANMETRICS_VERSION = (byte)1; diff --git src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 61374d9..77cd608 100644 --- src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -65,6 +65,8 @@ import org.apache.zookeeper.KeeperException; * replication. *

*/ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ReplicationAdmin implements Closeable { private final ReplicationZookeeper replicationZk; diff --git src/main/java/org/apache/hadoop/hbase/io/DoubleOutputStream.java src/main/java/org/apache/hadoop/hbase/io/DoubleOutputStream.java index cf33cd3..913a1f7 100644 --- src/main/java/org/apache/hadoop/hbase/io/DoubleOutputStream.java +++ src/main/java/org/apache/hadoop/hbase/io/DoubleOutputStream.java @@ -28,6 +28,7 @@ import java.io.OutputStream; * {@link #close()} fails on the first stream, it is not called on the second * stream. */ +@InterfaceAudience.Private public class DoubleOutputStream extends OutputStream { private OutputStream out1; private OutputStream out2; diff --git src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 23bbc2b..d373ec6 100644 --- src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Bytes; * *

This file is not splitable. Calls to {@link #midkey()} return null. */ +@InterfaceAudience.Private public class HalfStoreFileReader extends StoreFile.Reader { final Log LOG = LogFactory.getLog(HalfStoreFileReader.class); final boolean top; diff --git src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java index 45eb495..383575d 100644 --- src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java +++ src/main/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java @@ -45,6 +45,7 @@ import org.apache.hadoop.util.ReflectionUtils; * @param key TODO: Parameter K is never used, could be removed. * @param value Expects a Writable or byte []. */ +// TODO: interface audience? public class HbaseMapWritable implements SortedMap, Configurable, Writable, CodeToClassAndBack{ private AtomicReference conf = null; @@ -218,4 +219,4 @@ implements SortedMap, Configurable, Writable, CodeToClassAndBack{ this.instance.put(key, value); } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index 9002a0f..b93d253 100644 --- src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ src/main/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -108,6 +108,7 @@ import org.apache.hadoop.io.WritableUtils; * name and reflection to instantiate class was costing in excess of the cell * handling). */ +@InterfaceAudience.Private // TODO really? public class HbaseObjectWritable implements Writable, WritableWithSize, Configurable { protected final static Log LOG = LogFactory.getLog(HbaseObjectWritable.class); diff --git src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java index 0cd5213..016e5bd 100644 --- src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java +++ src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java @@ -39,6 +39,8 @@ import org.apache.hadoop.io.WritableComparator; * the underlying byte [] is not copied, just referenced. The backing * buffer is accessed when we go to serialize. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ImmutableBytesWritable implements WritableComparable { private byte[] bytes; diff --git src/main/java/org/apache/hadoop/hbase/io/Reference.java src/main/java/org/apache/hadoop/hbase/io/Reference.java index 219203c..750a63e 100644 --- src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -48,6 +48,7 @@ import org.apache.hadoop.io.Writable; * Note, a region is itself not splitable if it has instances of store file * references. References are cleaned up by compactions. */ +@InterfaceAudience.Private public class Reference implements Writable { private byte [] splitkey; private Range region; @@ -153,4 +154,4 @@ public class Reference implements Writable { in.close(); } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/io/TimeRange.java src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index 12a9b68..5be3676 100644 --- src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.util.Bytes; * or [minStamp,maxStamp) in interval notation. *

* Only used internally; should not be accessed directly by clients. +@InterfaceAudience.Public */ public class TimeRange implements Writable { private long minStamp = 0L; diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java index 34513f1..ce4eafb 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java @@ -34,6 +34,7 @@ import org.apache.hadoop.io.Writable; * the owning Table is included in the summarization. * */ +// TODO: stability? public class BlockCacheColumnFamilySummary implements Writable, Comparable { private String table = ""; diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 6e5c289..a476823 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -31,6 +31,7 @@ import org.apache.hadoop.util.StringUtils; /** * Stores all of the cache objects and configuration for a single HFile. */ +@InterfaceAudience.Private public class CacheConfig { private static final Log LOG = LogFactory.getLog(CacheConfig.class.getName()); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 5380810..0b7a7fd 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; /** * Class that implements cache metrics. */ +@InterfaceAudience.Private public class CacheStats { /** The number of getBlock requests that were cache hits */ private final AtomicLong hitCount = new AtomicLong(0); @@ -115,4 +116,4 @@ public class CacheStats { public double evictedPerEviction() { return ((float)getEvictedCount()/(float)getEvictionCount()); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 75259fb..a8773f1 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.util.ClassSize; * either instantiating as in-memory or handling the transition from single * to multiple access. */ +@InterfaceAudience.Private public class CachedBlock implements HeapSize, Comparable { public final static long PER_BLOCK_OVERHEAD = ClassSize.align( @@ -112,4 +113,4 @@ public class CachedBlock implements HeapSize, Comparable { public BlockPriority getPriority() { return this.priority; } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java index 1637fbf..62a0db1 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.io.HeapSize; *

Object used in this queue must implement {@link HeapSize} as well as * {@link Comparable}. */ +@InterfaceAudience.Private public class CachedBlockQueue implements HeapSize { private MinMaxPriorityQueue queue; diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java index 9966008..49812f1 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java @@ -37,6 +37,7 @@ import org.apache.hadoop.util.StringUtils; * combined size and hits and misses of both caches. * **/ +@InterfaceAudience.Private public class DoubleBlockCache implements BlockCache, HeapSize { static final Log LOG = LogFactory.getLog(DoubleBlockCache.class.getName()); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index a3a3b91..366ba32 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -44,6 +44,7 @@ import com.google.common.io.NullOutputStream; * trailer size is fixed within a given {@link HFile} format version only, but * we always store the version number as the last four-byte integer of the file. */ +@InterfaceAudience.Private public class FixedFileTrailer { private static final Log LOG = LogFactory.getLog(FixedFileTrailer.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 5882af9..63ae81e 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -113,6 +113,7 @@ import org.apache.hadoop.io.Writable; * that points at its file say for the case where an index lives apart from * an HFile instance? */ +@InterfaceAudience.Private public class HFile { static final Log LOG = LogFactory.getLog(HFile.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 7fff570..cddecee 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -75,6 +75,7 @@ import static org.apache.hadoop.hbase.io.hfile.Compression.Algorithm.NONE; * The version 2 block representation in the block cache is the same as above, * except that the data section is always uncompressed in the cache. */ +@InterfaceAudience.Private public class HFileBlock extends SchemaConfigured implements Cacheable { /** The size of a version 2 {@link HFile} block header */ diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 3f6ccb6..216b064 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -56,6 +56,7 @@ import org.apache.hadoop.util.StringUtils; * to use the reader can be found in {@link HFileReaderV2} and * TestHFileBlockIndex. */ +@InterfaceAudience.Private public class HFileBlockIndex { private static final Log LOG = LogFactory.getLog(HFileBlockIndex.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 85f9096..eaa53de 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.util.Writables; /** * Implements pretty-printing functionality for {@link HFile}s. */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class HFilePrettyPrinter { private static final Log LOG = LogFactory.getLog(HFilePrettyPrinter.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java index 4aa8680..6e620ca 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java @@ -43,6 +43,7 @@ import com.google.common.base.Preconditions; /** * {@link HFile} reader for version 1. */ +@InterfaceAudience.Private public class HFileReaderV1 extends AbstractHFileReader { private static final Log LOG = LogFactory.getLog(HFileReaderV1.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index c1f304e..bc2eb79 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -41,6 +41,7 @@ import org.apache.hadoop.io.WritableUtils; /** * {@link HFile} reader for version 2. */ +@InterfaceAudience.Private public class HFileReaderV2 extends AbstractHFileReader { private static final Log LOG = LogFactory.getLog(HFileReaderV2.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java index e58d0cc..d322ec9 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java @@ -46,6 +46,7 @@ import org.apache.hadoop.io.compress.Compressor; /** * Writes version 1 HFiles. Mainly used for testing backwards-compatibilty. */ +@InterfaceAudience.Private public class HFileWriterV1 extends AbstractHFileWriter { /** Meta data block name for bloom filter parameters. */ diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index bc61a3e..9862ec3 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -46,6 +46,7 @@ import org.apache.hadoop.io.WritableUtils; /** * Writes HFile format version 2. */ +@InterfaceAudience.Private public class HFileWriterV2 extends AbstractHFileWriter { static final Log LOG = LogFactory.getLog(HFileWriterV2.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index dd244c9..d1458ed 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -87,6 +87,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * to free). It then uses the priority chunk sizes to evict fairly according * to the relative sizes and usage. */ +@InterfaceAudience.Private public class LruBlockCache implements BlockCache, HeapSize { static final Log LOG = LogFactory.getLog(LruBlockCache.class); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java index dc236ed..055dae3 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; /** * Simple one RFile soft reference cache. */ +@InterfaceAudience.Private public class SimpleBlockCache implements BlockCache { private static class Ref extends SoftReference { public String blockId; diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java index c0b1c89..5028df3 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java @@ -51,6 +51,7 @@ import com.google.common.collect.MapMaker; * ConcurrentLinkedHashMap. * **/ +@InterfaceAudience.Private public class SingleSizeCache implements BlockCache, HeapSize { private final Slab backingStore; private final ConcurrentMap backingMap; diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java index 50c33eb..d8b9c1e 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java @@ -51,6 +51,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * correct SingleSizeCache. * **/ +@InterfaceAudience.Private public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { private final ConcurrentHashMap backingStore; diff --git src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java index b233d10..7cc0dc8 100644 --- src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java +++ src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo; * Implementing as future callable we are able to act on the timeout * asynchronously. */ +@InterfaceAudience.Private public class AssignCallable implements Callable { private AssignmentManager assignmentManager; diff --git src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 111e91c..1c95ddb 100644 --- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -94,6 +94,7 @@ import org.apache.zookeeper.data.Stat; *

* Handles existing regions in transition during master failover. */ +@InterfaceAudience.Private public class AssignmentManager extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(AssignmentManager.class); diff --git src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java index 7dbec87..5507f39 100644 --- src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java +++ src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName; /** * Performs bulk reopen of the list of regions provided to it. */ +@InterfaceAudience.Private public class BulkReOpen extends BulkAssigner { private final Map> rsToRegions; private final AssignmentManager assignmentManager; diff --git src/main/java/org/apache/hadoop/hbase/master/DeadServer.java src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index 26e5714..5d0c811 100644 --- src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName; /** * Class to hold dead servers list and utility querying dead server list. */ +@InterfaceAudience.Private public class DeadServer implements Set { /** * Set of known dead servers. On znode expiration, servers are added here. @@ -169,4 +170,4 @@ public class DeadServer implements Set { public synchronized String toString() { return this.deadServers.toString(); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java index d64e147..b6daba5 100644 --- src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java +++ src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java @@ -68,6 +68,7 @@ import com.google.common.collect.Sets; * *

This classes produces plans for the {@link AssignmentManager} to execute. */ +@InterfaceAudience.Private public class DefaultLoadBalancer implements LoadBalancer { private static final Log LOG = LogFactory.getLog(LoadBalancer.class); private static final Random RANDOM = new Random(System.currentTimeMillis()); diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 20d00cb..c79bc4b 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -115,6 +115,7 @@ import org.apache.zookeeper.Watcher; * @see HMasterRegionInterface * @see Watcher */ +@InterfaceAudience.Private public class HMaster extends HasThread implements HMasterInterface, HMasterRegionInterface, MasterServices, Server { private static final Log LOG = LogFactory.getLog(HMaster.class.getName()); diff --git src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index 629d4fc..8ad9b6b 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.util.ServerCommandLine; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.zookeeper.KeeperException; +@InterfaceAudience.Private public class HMasterCommandLine extends ServerCommandLine { private static final Log LOG = LogFactory.getLog(HMasterCommandLine.class); diff --git src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java index 89685bb..7bc045e 100644 --- src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java +++ src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java @@ -26,6 +26,7 @@ import org.apache.hadoop.util.ReflectionUtils; /** * The class that creates a load balancer from a conf. */ +@InterfaceAudience.Private public class LoadBalancerFactory { /** diff --git src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java index 5a5d9a5..e25da61 100644 --- src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java +++ src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java @@ -41,6 +41,7 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS * This Chore, everytime it runs, will clear the HLogs in the old logs folder * that are deletable for each log cleaner in the chain. */ +@InterfaceAudience.Private public class LogCleaner extends Chore { static final Log LOG = LogFactory.getLog(LogCleaner.class.getName()); diff --git src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 4beafb2..e135c1d 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -33,6 +33,7 @@ import java.io.IOException; * operations. {@link HMaster} interacts with the loaded coprocessors * through this class. */ +@InterfaceAudience.Private public class MasterCoprocessorHost extends CoprocessorHost { diff --git src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java index 876eda4..f03d529 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.monitoring.StateDumpServlet; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.util.ReflectionUtils; +@InterfaceAudience.Private public class MasterDumpServlet extends StateDumpServlet { private static final long serialVersionUID = 1L; private static final String LINE = diff --git src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 3938fa7..5891116 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.util.FSUtils; * the underlying file system, including splitting log files, checking file * system status, etc. */ +@InterfaceAudience.Private public class MasterFileSystem { private static final Log LOG = LogFactory.getLog(MasterFileSystem.class.getName()); // HBase configuration diff --git src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java index ef3e28b..e94e6a5 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java @@ -41,6 +41,7 @@ import org.apache.hbase.tmpl.master.MasterStatusTmpl; * The servlet responsible for rendering the index page of the * master. */ +@InterfaceAudience.Private public class MasterStatusServlet extends HttpServlet { private static final Log LOG = LogFactory.getLog(MasterStatusServlet.class); private static final long serialVersionUID = 1L; diff --git src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java index 3c1fe83..9dbaa5f 100644 --- src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java +++ src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName; * The comparable implementation of this class compares only the region * information and not the source/dest server info. */ +@InterfaceAudience.Private public class RegionPlan implements Comparable { private final HRegionInfo hri; private final ServerName source; diff --git src/main/java/org/apache/hadoop/hbase/master/ServerManager.java src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index f76a56a..51ff178 100644 --- src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; * was started). This is used to differentiate a restarted instance of a given * server from the original instance. */ +@InterfaceAudience.Private public class ServerManager { private static final Log LOG = LogFactory.getLog(ServerManager.class); diff --git src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index b39d997..8d75cad 100644 --- src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -93,6 +93,7 @@ import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.* * again. If a task is resubmitted then there is a risk that old "delete task" * can delete the re-submission. */ +@InterfaceAudience.Private public class SplitLogManager extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(SplitLogManager.class); diff --git src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java index dde8207..f7c5c82 100644 --- src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java +++ src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; * Log cleaner that uses the timestamp of the hlog to determine if it should * be deleted. By default they are allowed to live for 10 minutes. */ +@InterfaceAudience.Private public class TimeToLiveLogCleaner implements LogCleanerDelegate { static final Log LOG = LogFactory.getLog(TimeToLiveLogCleaner.class.getName()); private Configuration conf; @@ -79,4 +80,4 @@ public class TimeToLiveLogCleaner implements LogCleanerDelegate { public boolean isStopped() { return this.stopped; } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java index 2cbe7e0..0ebbad5 100644 --- src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java +++ src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HRegionInfo; * taken for unassignment of a region in transition. Implementing as future * callable we are able to act on the timeout asynchronously. */ +@InterfaceAudience.Private public class UnAssignCallable implements Callable { private AssignmentManager assignmentManager; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java index 2dfc3e7..1f5b39b 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.master.AssignmentManager; *

* Otherwise, assigns the region to another server. */ +@InterfaceAudience.Private public class ClosedRegionHandler extends EventHandler implements TotesHRegionInfo { private static final Log LOG = LogFactory.getLog(ClosedRegionHandler.class); private final AssignmentManager assignmentManager; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 4600991..b7d74c0 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -48,6 +48,7 @@ import org.apache.zookeeper.KeeperException; /** * Handler to create a table. */ +@InterfaceAudience.Private public class CreateTableHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(CreateTableHandler.class); private MasterFileSystem fileSystemManager; @@ -187,4 +188,4 @@ public class CreateTableHandler extends EventHandler { " enabled because of a ZooKeeper issue", e); } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 17dd2f9..aa56e8c 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; +@InterfaceAudience.Private public class DeleteTableHandler extends TableEventHandler { private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class); diff --git src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index 5af0690..576af8f 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -40,6 +40,7 @@ import org.apache.zookeeper.KeeperException; /** * Handler to run disable of a table. */ +@InterfaceAudience.Private public class DisableTableHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(DisableTableHandler.class); private final byte [] tableName; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index baaf279..d4fcc8e 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -40,6 +40,7 @@ import org.apache.zookeeper.KeeperException; /** * Handler to run enable of a table. */ +@InterfaceAudience.Private public class EnableTableHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(EnableTableHandler.class); private final byte [] tableName; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java index 99d77e4..9965da9 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.master.MasterServices; * Shutdown handler for the server hosting -ROOT-, * .META., or both. */ +@InterfaceAudience.Private public class MetaServerShutdownHandler extends ServerShutdownHandler { private final boolean carryingRoot; private final boolean carryingMeta; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index c091816..a2d4b3f 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.master.MasterServices; +@InterfaceAudience.Private public class ModifyTableHandler extends TableEventHandler { private final HTableDescriptor htd; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java index f171a5a..ba8e21b 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java @@ -34,6 +34,7 @@ import org.apache.zookeeper.KeeperException; /** * Handles OPENED region event on Master. */ +@InterfaceAudience.Private public class OpenedRegionHandler extends EventHandler implements TotesHRegionInfo { private static final Log LOG = LogFactory.getLog(OpenedRegionHandler.class); private final AssignmentManager assignmentManager; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 2dd497b..ab1dc17 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -48,6 +48,7 @@ import org.apache.zookeeper.KeeperException; * Server-to-handle must be already in the deadservers lists. See * {@link ServerManager#expireServer(ServerName)} */ +@InterfaceAudience.Private public class ServerShutdownHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(ServerShutdownHandler.class); private final ServerName serverName; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java index 2d544dd..805e085 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java @@ -36,6 +36,7 @@ import org.apache.zookeeper.KeeperException.NoNodeException; /** * Handles SPLIT region event on Master. */ +@InterfaceAudience.Private public class SplitRegionHandler extends EventHandler implements TotesHRegionInfo { private static final Log LOG = LogFactory.getLog(SplitRegionHandler.class); private final AssignmentManager assignmentManager; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java index d993367..99da1fe 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.master.MasterServices; /** * Handles adding a new family to an existing table. */ +@InterfaceAudience.Private public class TableAddFamilyHandler extends TableEventHandler { private final HColumnDescriptor familyDesc; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index 07f67dd..4a3f9af 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Handles adding a new family to an existing table. */ +@InterfaceAudience.Private public class TableDeleteFamilyHandler extends TableEventHandler { private final byte [] familyName; diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java index c6e292e..239f1e5 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Handles adding a new family to an existing table. */ +@InterfaceAudience.Private public class TableModifyFamilyHandler extends TableEventHandler { private final HColumnDescriptor familyDesc; diff --git src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java index 9e4cf73..dac4485 100644 --- src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java +++ src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java @@ -41,6 +41,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry; * This class has a number of metrics variables that are publicly accessible; * these variables (objects) have methods to update their values. */ +@InterfaceAudience.Private public class MasterMetrics implements Updater { private final Log LOG = LogFactory.getLog(this.getClass()); private final MetricsRecord metricsRecord; diff --git src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java index d885348..5251280 100644 --- src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java +++ src/main/java/org/apache/hadoop/hbase/master/metrics/MasterStatistics.java @@ -27,6 +27,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry; * Exports the {@link MasterMetrics} statistics as an MBean * for JMX. */ +@InterfaceAudience.Private public class MasterStatistics extends MetricsMBeanBase { private final ObjectName mbeanName; diff --git src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java index eeb18e8..f2d7238 100644 --- src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java +++ src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java @@ -42,6 +42,7 @@ import org.apache.hadoop.io.WritableComparable; * Contains HRegion id, start and end keys, a reference to this * HRegions' table descriptor, etc. */ +@InterfaceAudience.Private public class HRegionInfo090x extends VersionedWritable implements WritableComparable{ private static final byte VERSION = 0; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java index a617d68..6efccd4 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java @@ -24,6 +24,7 @@ package org.apache.hadoop.hbase.regionserver; *

* NOT thread-safe because it is not used in a multi-threaded context, yet. */ +@InterfaceAudience.Private public class ColumnCount { private final byte [] bytes; private final int offset; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 6e8dfa3..88b3e07 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -38,6 +38,7 @@ import com.google.common.base.Preconditions; /** * Compact region on request and then run split if appropriate */ +@InterfaceAudience.Private public class CompactSplitThread implements CompactionRequestor { static final Log LOG = LogFactory.getLog(CompactSplitThread.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/DebugPrint.java src/main/java/org/apache/hadoop/hbase/regionserver/DebugPrint.java index e1d69c7..b5150ae 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/DebugPrint.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/DebugPrint.java @@ -23,6 +23,7 @@ import java.io.FileWriter; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; +@InterfaceAudience.Private public class DebugPrint { private static final AtomicBoolean enabled = new AtomicBoolean(false); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index 8e25796..9b277e0 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

* This class is NOT thread-safe as queries are never multi-threaded */ +@InterfaceAudience.Private public class ExplicitColumnTracker implements ColumnTracker { private final int maxVersions; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index e26e213..0399ed5 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -152,6 +152,7 @@ import com.google.common.collect.MutableClassToInstanceMap; * regionName is a unique identifier for this HRegion. (startKey, endKey] * defines the keyspace for this HRegion. */ +@InterfaceAudience.Private public class HRegion implements HeapSize { // , Writable{ public static final Log LOG = LogFactory.getLog(HRegion.class); static final String MERGEDIR = "merges"; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index a7266d3..15f0402 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -52,6 +52,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.lang.mutable.MutableDouble; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -161,6 +162,7 @@ import com.google.common.collect.Lists; * HRegionServer makes a set of HRegions available to clients. It checks in with * the HMaster. There are many HRegionServers in a single HBase deployment. */ +@InterfaceAudience.Private public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, Runnable, RegionServerServices { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java index 71b9985..0ccd8e4 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.util.ServerCommandLine; * Class responsible for parsing the command line and starting the * RegionServer. */ +@InterfaceAudience.Private public class HRegionServerCommandLine extends ServerCommandLine { private static final Log LOG = LogFactory.getLog(HRegionServerCommandLine.class); @@ -84,4 +85,4 @@ public class HRegionServerCommandLine extends ServerCommandLine { return -1; } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java index 18b6c13..4985d48 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java @@ -62,6 +62,7 @@ import org.apache.thrift.transport.TTransportFactory; *

* This can be enabled with hbase.regionserver.export.thrift set to true. */ +@InterfaceAudience.Private public class HRegionThriftServer extends Thread { public static final Log LOG = LogFactory.getLog(HRegionThriftServer.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index c840e7c..cf8432a 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -40,6 +40,7 @@ import java.util.PriorityQueue; * also implements InternalScanner. WARNING: As is, if you try to use this * as an InternalScanner at the Store level, you will get runtime exceptions. */ +@InterfaceAudience.Private public class KeyValueHeap extends NonLazyKeyValueScanner implements KeyValueScanner, InternalScanner { private PriorityQueue heap = null; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java index cafbb28..8be2178 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; /** * Reports a problem with a lease */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class LeaseException extends DoNotRetryIOException { private static final long serialVersionUID = 8179703995292418650L; @@ -39,4 +41,4 @@ public class LeaseException extends DoNotRetryIOException { public LeaseException(String message) { super(message); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index c518521..bc5e288 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -51,6 +51,7 @@ import java.io.IOException; * can be interrupted when there is something to do, rather than the Chore * sleep time which is invariant. */ +@InterfaceAudience.Private public class Leases extends HasThread { private static final Log LOG = LogFactory.getLog(Leases.class.getName()); private final int leasePeriod; @@ -297,4 +298,4 @@ public class Leases extends HasThread { this.expirationTime = expirationTime; } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java index 161ae18..1abc563 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java @@ -48,6 +48,7 @@ import java.util.Set; *

* This class contains internal synchronization and is thread-safe. */ +@InterfaceAudience.Private public class LruHashMap implements HeapSize, Map { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 5c2d72c..b6f4d71 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.util.ClassSize; * TODO: With new KVSLS, need to make sure we update HeapSize with difference * in KV size. */ +@InterfaceAudience.Private public class MemStore implements HeapSize { private static final Log LOG = LogFactory.getLog(MemStore.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index cbb76e8..6449e59 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -46,6 +46,7 @@ import com.google.common.base.Preconditions; * Bytes.toLong/Bytes.toInt calls in KeyValue, but some of those are cached * anyway */ +@InterfaceAudience.Private public class MemStoreLAB { private AtomicReference curChunk = new AtomicReference(); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java index 6b28f03..76cf410 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConsistencyControl.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; * a mechanism for writers to obtain new write numbers, then "commit" * the new writes for readers to read (thus forming atomic transactions). */ +@InterfaceAudience.Private public class MultiVersionConsistencyControl { private volatile long memstoreRead = 0; private volatile long memstoreWrite = 0; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java index 4881fc0..7255f02 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/NoSuchColumnFamilyException.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; /** * Thrown if request for nonexistent column family. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class NoSuchColumnFamilyException extends DoNotRetryIOException { private static final long serialVersionUID = -6569952730832331274L; @@ -38,4 +40,4 @@ public class NoSuchColumnFamilyException extends DoNotRetryIOException { public NoSuchColumnFamilyException(String message) { super(message); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java index b6f7456..fb3e074 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/OperationStatus.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hbase.HConstants.OperationStatusCode; * the operation status in future. * */ +@InterfaceAudience.Public // TODO? +@InterfaceStability.Evolving public class OperationStatus { private OperationStatusCode code; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java index df33d82..0fbc399 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.monitoring.StateDumpServlet; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.util.ReflectionUtils; +@InterfaceAudience.Private public class RSDumpServlet extends StateDumpServlet { private static final long serialVersionUID = 1L; private static final String LINE = diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java index 0f1fd04..c9e8920 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java @@ -28,6 +28,7 @@ import javax.servlet.http.HttpServletResponse; import org.apache.hbase.tmpl.regionserver.RSStatusTmpl; +@InterfaceAudience.Private public class RSStatusServlet extends HttpServlet { private static final long serialVersionUID = 1L; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionAlreadyInTransitionException.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionAlreadyInTransitionException.java index 1c21825..5bffd4a 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionAlreadyInTransitionException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionAlreadyInTransitionException.java @@ -25,6 +25,7 @@ import java.io.IOException; * This exception is thrown when a region server is asked to open or close * a region but it's already processing it */ +@InterfaceAudience.Private public class RegionAlreadyInTransitionException extends IOException { public RegionAlreadyInTransitionException(String s) { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 7cee17c..29c84e6 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -52,6 +52,8 @@ import java.util.regex.Matcher; * Implements the coprocessor environment and runtime support for coprocessors * loaded within a {@link HRegion}. */ +@InterfaceAudience.Public +@InterfaceStability.Unstable public class RegionCoprocessorHost extends CoprocessorHost { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java index 05c842e..43c550b 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java @@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicLong; * RegionServerAccounting keeps record of some basic real time information about * the Region Server. Currently, it only keeps record the global memstore size. */ +@InterfaceAudience.Private public class RegionServerAccounting { private final AtomicLong atomicGlobalMemstoreSize = new AtomicLong(0); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java index ed36ed7..07d595c 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java @@ -25,6 +25,7 @@ import java.io.IOException; * Thrown if the region server log directory exists (which indicates another * region server is running at the same address) */ +@InterfaceAudience.Private public class RegionServerRunningException extends IOException { private static final long serialVersionUID = 1L << 31 - 1L; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java index 45acb17..7948764 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerStoppedException.java @@ -25,9 +25,11 @@ import java.io.IOException; * Thrown by the region server when it is in shutting down state. */ @SuppressWarnings("serial") +@InterfaceAudience.Public +@InterfaceStability.Stable public class RegionServerStoppedException extends IOException { public RegionServerStoppedException(String s) { super(s); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java index b828669..9b3230d 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

* This class is NOT thread-safe as queries are never multi-threaded */ +@InterfaceAudience.Private public class ScanDeleteTracker implements DeleteTracker { private long familyStamp = -1L; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 8992a56..294d682 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.regionserver.StoreScanner.ScanType; /** * A query matcher that is specifically designed for the scan case. */ +@InterfaceAudience.Private public class ScanQueryMatcher { // Optimization so we can skip lots of compares when we decide to skip // to the next row. diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java index f365512..5841c6e 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Keeps track of the columns for a scan if they are not explicitly specified */ +@InterfaceAudience.Private public class ScanWildcardColumnTracker implements ColumnTracker { private byte [] columnBuffer = null; private int columnOffset = 0; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java index 4ccf68e..f0e0016 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/ShutdownHook.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.util.Threads; * Manage regionserver shutdown hooks. * @see #install(Configuration, FileSystem, Stoppable, Thread) */ +@InterfaceAudience.Private public class ShutdownHook { private static final Log LOG = LogFactory.getLog(ShutdownHook.class); private static final String CLIENT_FINALIZER_DATA_METHOD = "clientFinalizer"; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 0f119e5..38fe9ff 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -65,6 +65,7 @@ import org.apache.zookeeper.data.Stat; * is stripped of its ownership. Here we rely on the idempotency of the log * splitting task for correctness */ +@InterfaceAudience.Private public class SplitLogWorker extends ZooKeeperListener implements Runnable { private static final Log LOG = LogFactory.getLog(SplitLogWorker.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 08b7de3..7891675 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -82,6 +82,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; *

This class is not thread safe. Caller needs ensure split is run by * one thread only. */ +@InterfaceAudience.Private public class SplitTransaction { private static final Log LOG = LogFactory.getLog(SplitTransaction.class); private static final String SPLITDIR = "splits"; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index ade34dd..46edfe6 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -95,6 +95,7 @@ import com.google.common.collect.Lists; *

Locking and transactions are handled at a higher level. This API should * not be called directly but by an HRegion manager. */ +@InterfaceAudience.Private public class Store extends SchemaConfigured implements HeapSize { static final Log LOG = LogFactory.getLog(Store.class); protected final MemStore memstore; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 6c0dd69..5a921e0 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -82,6 +82,7 @@ import com.google.common.collect.Ordering; * The reason for this weird pattern where you use a different instance for the * writer and a reader is that we write once but read a lot more. */ +@InterfaceAudience.Private public class StoreFile { static final Log LOG = LogFactory.getLog(StoreFile.class.getName()); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index d3f1c65..8021ab2 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -36,6 +36,7 @@ import org.apache.hadoop.io.Writable; * When writing StoreFiles, this information is stored in meta blocks and used * at read time to match against the required TimeRange */ +@InterfaceAudience.Private public class TimeRangeTracker implements Writable { long minimumTimestamp = -1; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java index 52b9a6c..459125c 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/WrongRegionException.java @@ -24,6 +24,7 @@ import java.io.IOException; /** * Thrown when a request contains a key which is not part of this region */ +@InterfaceAudience.Private public class WrongRegionException extends IOException { private static final long serialVersionUID = 993179627856392526L; @@ -39,4 +40,4 @@ public class WrongRegionException extends IOException { public WrongRegionException(String s) { super(s); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java index 9bc66e1..784aa57 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionProgress.java @@ -29,6 +29,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; * total amount scheduled to be compacted. * */ +@InterfaceAudience.Private public class CompactionProgress { /** the total compacting key values in currently running compaction */ diff --git src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 7544b71..8d9c145 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -43,6 +43,7 @@ import com.google.common.collect.Collections2; /** * This class holds all details necessary to run a compaction. */ +@InterfaceAudience.Private public class CompactionRequest implements Comparable, Runnable { static final Log LOG = LogFactory.getLog(CompactionRequest.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java index e8e95ed..31d304b 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; /** * Handles closing of the root region on a region server. */ +@InterfaceAudience.Private public class CloseMetaHandler extends CloseRegionHandler { // Called when master tells us shutdown a region via close rpc public CloseMetaHandler(final Server server, @@ -40,4 +41,4 @@ public class CloseMetaHandler extends CloseRegionHandler { final boolean abort, final boolean zk) { super(server, rsServices, regionInfo, abort, zk, EventType.M_RS_CLOSE_META); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java index a25ca32..63413f7 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java @@ -34,6 +34,7 @@ import org.apache.zookeeper.KeeperException; /** * Handles closing of a region on a region server. */ +@InterfaceAudience.Private public class CloseRegionHandler extends EventHandler { // NOTE on priorities shutting down. There are none for close. There are some // for open. I think that is right. On shutdown, we want the meta to close diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRootHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRootHandler.java index fa38ad6..d86c9e7 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRootHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRootHandler.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; /** * Handles closing of the root region on a region server. */ +@InterfaceAudience.Private public class CloseRootHandler extends CloseRegionHandler { // This is executed after receiving an CLOSE RPC from the master for root. public CloseRootHandler(final Server server, @@ -40,4 +41,4 @@ public class CloseRootHandler extends CloseRegionHandler { final boolean abort, final boolean zk) { super(server, rsServices, regionInfo, abort, zk, EventType.M_RS_CLOSE_ROOT); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java index 66e5706..5114b39 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; *

* This is executed after receiving an OPEN RPC from the master for meta. */ +@InterfaceAudience.Private public class OpenMetaHandler extends OpenRegionHandler { public OpenMetaHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, @@ -41,4 +42,4 @@ public class OpenMetaHandler extends OpenRegionHandler { super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_META, versionOfOfflineNode); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index 3ef364e..0322b03 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -40,6 +40,7 @@ import org.apache.zookeeper.KeeperException; *

* This is executed after receiving an OPEN RPC from the master or client. */ +@InterfaceAudience.Private public class OpenRegionHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(OpenRegionHandler.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java index 9a4f01a..910caeb 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; *

* This is executed after receiving an OPEN RPC from the master for root. */ +@InterfaceAudience.Private public class OpenRootHandler extends OpenRegionHandler { public OpenRootHandler(final Server server, final RegionServerServices rsServices, HRegionInfo regionInfo, diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java index c34874c..4ef854f 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java @@ -49,6 +49,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate; * for example: rpcQueueTime.inc(time) * */ +@InterfaceAudience.Private public class RegionServerDynamicMetrics implements Updater { private MetricsRecord metricsRecord; private MetricsContext context; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java index c250d81..8795281 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicStatistics.java @@ -31,6 +31,7 @@ import javax.management.ObjectName; * {@link RegionServerDynamicMetrics} as an MBean * for JMX monitoring. */ +@InterfaceAudience.Private public class RegionServerDynamicStatistics extends MetricsDynamicMBeanBase { private final ObjectName mbeanName; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java index d1db857..0934812 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java @@ -52,6 +52,7 @@ import java.util.List; * This class has a number of metrics variables that are publicly accessible; * these variables (objects) have methods to update their values. */ +@InterfaceAudience.Private public class RegionServerMetrics implements Updater { @SuppressWarnings({"FieldCanBeLocal"}) private final Log LOG = LogFactory.getLog(this.getClass()); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java index 04fe7b1..ac8eeae 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerStatistics.java @@ -29,6 +29,7 @@ import javax.management.ObjectName; * Exports metrics recorded by {@link RegionServerMetrics} as an MBean * for JMX monitoring. */ +@InterfaceAudience.Private public class RegionServerStatistics extends MetricsMBeanBase { private final ObjectName mbeanName; @@ -44,4 +45,4 @@ public class RegionServerStatistics extends MetricsMBeanBase { MBeanUtil.unregisterMBean(mbeanName); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java index d8ac453..d733b1c 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaConfigured.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.util.ClassSize; * from another similar object. For example, an HFile reader configures HFile * blocks it reads with its own table/CF name. */ +@InterfaceAudience.Private public class SchemaConfigured implements HeapSize, SchemaAware { private static final Log LOG = LogFactory.getLog(SchemaConfigured.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java index cef50d4..54dfd5f 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/SchemaMetrics.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.util.Pair; * * */ +@InterfaceAudience.Private public class SchemaMetrics { public interface SchemaAware { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java index 393b1d2..bd31bd3 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java @@ -25,6 +25,7 @@ import java.io.IOException; * Thrown when we fail close of the write-ahead-log file. * Package private. Only used inside this package. */ +@InterfaceAudience.Private public class FailedLogCloseException extends IOException { private static final long serialVersionUID = 1759152841462990925L; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 1592f0f..5b812fa 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -110,6 +110,7 @@ import org.apache.hadoop.util.StringUtils; * org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration)}. * */ +@InterfaceAudience.Private public class HLog implements Syncable { static final Log LOG = LogFactory.getLog(HLog.class); public static final byte [] METAFAMILY = Bytes.toBytes("METAFAMILY"); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java index f067221..5848d5b 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java @@ -42,6 +42,7 @@ import org.apache.hadoop.io.WritableUtils; *

Some Transactional edits (START, COMMIT, ABORT) will not have an * associated row. */ +@InterfaceAudience.Private public class HLogKey implements WritableComparable { // should be < 0 (@see #readFields(DataInput)) private static final int VERSION = -1; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java index d0e85f3..c728530 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java @@ -54,6 +54,8 @@ import org.codehaus.jackson.map.ObjectMapper; * It can also toggle output of values. * */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class HLogPrettyPrinter { private boolean outputValues; private boolean outputJSON; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 45426ca..3d5784e 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -67,6 +67,7 @@ import com.google.common.collect.Lists; * files that are no longer being written to, into new files, one per region for * region to replay on startup. Delete the old log files when finished. */ +@InterfaceAudience.Private public class HLogSplitter { private static final String LOG_SPLITTER_IMPL = "hbase.hlog.splitter.impl"; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java index 1c93def..8441a56 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/OrphanHLogAfterSplitException.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import java.io.IOException; +@InterfaceAudience.Private public class OrphanHLogAfterSplitException extends IOException { /** diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java index 497c5d0..2334122 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.SequenceFile; +@InterfaceAudience.Private public class SequenceFileLogReader implements HLog.Reader { private static final Log LOG = LogFactory.getLog(SequenceFileLogReader.class); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index a3eb32f..0afa569 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -42,6 +42,7 @@ import org.apache.hadoop.io.compress.DefaultCodec; * Implementation of {@link HLog.Writer} that delegates to * SequenceFile.Writer. */ +@InterfaceAudience.Private public class SequenceFileLogWriter implements HLog.Writer { private final Log LOG = LogFactory.getLog(this.getClass()); // The sequence file we delegate to. diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java index b14e190..dec22d3 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.java @@ -32,6 +32,8 @@ import org.apache.hadoop.conf.Configuration; * Implements the coprocessor environment and runtime support for coprocessors * loaded within a {@link HLog}. */ +@InterfaceAudience.Public +@InterfaceStability.Unstable public class WALCoprocessorHost extends CoprocessorHost { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index e1117ef..43d9c82 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -67,6 +67,7 @@ import org.apache.hadoop.io.Writable; * is an old style KeyValue or the new style WALEdit. * */ +@InterfaceAudience.Private public class WALEdit implements Writable, HeapSize { private final int VERSION_2 = -1; diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java index 0b1b647..e8d73ac 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterId.java @@ -30,6 +30,7 @@ import org.apache.zookeeper.KeeperException; * master on startup, and is subsequently available to all watchers (including * clients). */ +@InterfaceAudience.Private // TODO is it? public class ClusterId { private ZooKeeperWatcher watcher; private Abortable abortable; diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java index 7e1a952..d80f2dc 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java @@ -33,6 +33,7 @@ import org.apache.zookeeper.KeeperException; * cluster attributes up in zookeeper. * */ +@InterfaceAudience.Private public class ClusterStatusTracker extends ZooKeeperNodeTracker { private static final Log LOG = LogFactory.getLog(ClusterStatusTracker.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java index e90edf9..e199eb7 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java @@ -44,6 +44,7 @@ import org.apache.zookeeper.KeeperException; * {@link ServerManager#addServerToDrainList(ServerName)} * */ +@InterfaceAudience.Private public class DrainingServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(DrainingServerTracker.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index d551c6f..5090ec3 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -47,6 +47,7 @@ import org.apache.zookeeper.server.quorum.QuorumPeerMain; * control over the process. This class uses {@link ZKConfig} to parse the * zoo.cfg and inject variables from HBase's site.xml configuration in. */ +@InterfaceAudience.Private public class HQuorumPeer { /** diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/MasterSchemaChangeTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/MasterSchemaChangeTracker.java index 7d0ae63..64ab340 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/MasterSchemaChangeTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/MasterSchemaChangeTracker.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; import org.apache.zookeeper.KeeperException; +@InterfaceAudience.Private public class MasterSchemaChangeTracker extends ZooKeeperNodeTracker { public static final Log LOG = LogFactory.getLog(MasterSchemaChangeTracker.class); private final MasterServices masterServices; diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java index 4da6f96..7c307b9 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo; * receive any notification for that assignment. You will receive a * notification after META has been successfully assigned to a new location. */ +@InterfaceAudience.Private public class MetaNodeTracker extends ZooKeeperNodeTracker { /** * Creates a meta node tracker. @@ -44,4 +45,4 @@ public class MetaNodeTracker extends ZooKeeperNodeTracker { public void nodeDeleted(String path) { super.nodeDeleted(path); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java index 786ae21..2fd9998 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java @@ -46,6 +46,7 @@ import org.apache.zookeeper.server.persistence.FileTxnLog; * of redoing it, we should contribute updates to their code which let us more * easily access testing helper objects. */ +@InterfaceAudience.Private public class MiniZooKeeperCluster { private static final Log LOG = LogFactory.getLog(MiniZooKeeperCluster.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index a097ec0..0441874 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -61,6 +61,7 @@ import org.apache.zookeeper.data.Stat; * succeeded an the znode it created is "x-352-109". * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling" */ +@InterfaceAudience.Private public class RecoverableZooKeeper { private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class); // the actual ZooKeeper client instance diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java index 225df66..a20d324 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java @@ -42,6 +42,7 @@ import org.apache.zookeeper.KeeperException; *

If an RS node gets deleted, this automatically handles calling of * {@link ServerManager#expireServer(ServerName)} */ +@InterfaceAudience.Private public class RegionServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); private NavigableSet regionServers = new TreeSet(); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java index 48a8b3d..f4424cf 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RootRegionTracker.java @@ -32,6 +32,7 @@ import org.apache.zookeeper.KeeperException; * out of RegionServerServices. * This class has a watcher on the root location and notices changes. */ +@InterfaceAudience.Private public class RootRegionTracker extends ZooKeeperNodeTracker { /** * Creates a root region location tracker. diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java index 9233eee..0cc104e 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/SchemaChangeTracker.java @@ -40,6 +40,7 @@ import java.util.List; * alter schema requests from master and updates the status once the schema change * is complete. */ +@InterfaceAudience.Private public class SchemaChangeTracker extends ZooKeeperNodeTracker { public static final Log LOG = LogFactory.getLog(SchemaChangeTracker.class); private RegionServerServices regionServer = null; diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java index f14b026..06da537 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java @@ -88,6 +88,7 @@ import org.apache.zookeeper.data.Stat; * * */ +@InterfaceAudience.Private public class ZKAssign { private static final Log LOG = LogFactory.getLog(ZKAssign.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index bf5de8d..c408040 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -37,6 +37,7 @@ import org.apache.hadoop.util.StringUtils; /** * Utility methods for reading, parsing, and building zookeeper configuration. */ +@InterfaceAudience.Private public class ZKConfig { private static final Log LOG = LogFactory.getLog(ZKConfig.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java index f201799..6c824d1 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java @@ -36,6 +36,7 @@ import org.apache.zookeeper.KeeperException; * be removed, and all waiting instances will be notified, with the race * to claim the leader znode beginning all over again. */ +@InterfaceAudience.Private public class ZKLeaderManager extends ZooKeeperListener { private static Log LOG = LogFactory.getLog(ZKLeaderManager.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java index 500bd3c..ae5eaa5 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration; * Tool for reading ZooKeeper servers from HBase XML configuration and producing * a line-by-line list for use by bash scripts. */ +@InterfaceAudience.Public +@InterfaceStability.Stable public class ZKServerTool { /** * Run the tool. @@ -51,4 +53,4 @@ public class ZKServerTool { } } } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index 9b83840..7580899 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Common methods and attributes used by {@link SplitLogManager} and * {@link SplitLogWorker} */ +@InterfaceAudience.Private public class ZKSplitLog { private static final Log LOG = LogFactory.getLog(ZKSplitLog.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java index 4930f66..449a377 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java @@ -42,6 +42,7 @@ import org.apache.zookeeper.KeeperException; *

To save on trips to the zookeeper ensemble, internally we cache table * state. */ +@InterfaceAudience.Private public class ZKTable { // A znode will exist under the table directory if it is in any of the // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 132d1c2..0bf1f42 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -54,6 +54,7 @@ import org.apache.zookeeper.data.Stat; * zookeeper exception, so callers of these methods must handle appropriately. * If ZK is required for the operation, the server will need to be aborted. */ +@InterfaceAudience.Private public class ZKUtil { private static final Log LOG = LogFactory.getLog(ZKUtil.class); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServerArg.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServerArg.java index c662a5b..5b58b83 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServerArg.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServerArg.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; * members plus zk client port OR it emits '' if no zk servers found (Yes, * it emits '-server' too). */ +@InterfaceAudience.Private public class ZooKeeperMainServerArg { public String parse(final Configuration c) { // Note that we do not simply grab the property @@ -65,4 +66,4 @@ public class ZooKeeperMainServerArg { System.out.println((hostport == null || hostport.length() == 0)? "": "-server " + hostport); } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 3e3d131..1c6e15f 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -51,6 +51,7 @@ import org.apache.zookeeper.data.ACL; *

This class also holds and manages the connection to ZooKeeper. Code to * deal with connection related events and exceptions are handled here. */ +@InterfaceAudience.Private public class ZooKeeperWatcher implements Watcher, Abortable { private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class);