commit 35103fa2b6c5f91f034eaaa78c3c61369570da12 Author: 绝顶 Date: Tue Feb 2 14:41:10 2016 +0800 HBASE-15185 Fix jdk8 javadoc warnings for branch-1.1 diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java index b7bfa75..647bf6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java @@ -60,6 +60,7 @@ public interface CoordinatedStateManager { * Returns implementation of TableStateManager. * @throws InterruptedException if operation is interrupted * @throws CoordinatedStateException if error happens in underlying coordination mechanism + * @return instance of TableStateManager */ TableStateManager getTableStateManager() throws InterruptedException, CoordinatedStateException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index 0631958..fd7d2f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -225,7 +225,7 @@ public class HDFSBlocksDistribution { } /** - * return the sorted list of hosts in terms of their weights + * @return the sorted list of hosts in terms of their weights */ public List getTopHosts() { HostAndWeight[] hostAndWeights = getTopHostsWithWeights(); @@ -237,7 +237,7 @@ public class HDFSBlocksDistribution { } /** - * return the sorted list of hosts in terms of their weights + * @return the sorted list of hosts in terms of their weights */ public HostAndWeight[] getTopHostsWithWeights() { NavigableSet orderedHosts = new TreeSet( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java index 23d26dc..c1563bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java @@ -67,6 +67,7 @@ public interface InterProcessLock { * to the given timeout. Lock reaping is different than coordinated lock revocation * in that, there is no coordination, and the behavior is undefined if the * lock holder is still alive. + * @param expireTimeoutMs time out in milliseconds * @throws IOException If there is an unrecoverable error reaping the locks */ void reapExpiredLocks(long expireTimeoutMs) throws IOException; @@ -99,7 +100,8 @@ public interface InterProcessLock { /** * Visits the locks (both held and attempted) of this type with the given * {@link MetadataHandler}. - * @throws InterruptedException If there is an unrecoverable error + * @param handler the metadata handler to call + * @throws IOException If there is an unrecoverable error */ void visitLocks(MetadataHandler handler) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 7934a74..cb2227a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -75,8 +75,8 @@ public class LocalHBaseCluster { /** * Constructor. - * @param conf - * @throws IOException + * @param conf the Configuration instancne + * @throws IOException if an IO error occurs */ public LocalHBaseCluster(final Configuration conf) throws IOException { @@ -88,7 +88,7 @@ public class LocalHBaseCluster { * @param conf Configuration to use. Post construction has the master's * address. * @param noRegionServers Count of regionservers to start. - * @throws IOException + * @throws IOException if an IO error occurs */ public LocalHBaseCluster(final Configuration conf, final int noRegionServers) throws IOException { @@ -102,7 +102,7 @@ public class LocalHBaseCluster { * address. * @param noMasters Count of masters to start. * @param noRegionServers Count of regionservers to start. - * @throws IOException + * @throws IOException if an IO error occurs */ public LocalHBaseCluster(final Configuration conf, final int noMasters, final int noRegionServers) @@ -129,9 +129,9 @@ public class LocalHBaseCluster { * address. * @param noMasters Count of masters to start. * @param noRegionServers Count of regionservers to start. - * @param masterClass - * @param regionServerClass - * @throws IOException + * @param masterClass class of the master + * @param regionServerClass class of the regionserver + * @throws IOException if an IO error occurs */ @SuppressWarnings("unchecked") public LocalHBaseCluster(final Configuration conf, final int noMasters, @@ -234,7 +234,7 @@ public class LocalHBaseCluster { } /** - * @param serverNumber + * @param serverNumber index of regionserver * @return region server */ public HRegionServer getRegionServer(int serverNumber) { @@ -274,7 +274,7 @@ public class LocalHBaseCluster { /** * Wait for the specified region server to stop * Removes this thread from list of running threads. - * @param serverNumber + * @param serverNumber index of regionserver * @return Name of region server that just went down. */ public String waitOnRegionServer(int serverNumber) { @@ -295,7 +295,7 @@ public class LocalHBaseCluster { /** * Wait for the specified region server to stop * Removes this thread from list of running threads. - * @param rst + * @param rst the RegionServerThread to check against * @return Name of region server that just went down. */ public String waitOnRegionServer(JVMClusterUtil.RegionServerThread rst) { @@ -318,7 +318,7 @@ public class LocalHBaseCluster { } /** - * @param serverNumber + * @param serverNumber index of regionserver * @return the HMaster thread */ public HMaster getMaster(int serverNumber) { @@ -370,7 +370,7 @@ public class LocalHBaseCluster { /** * Wait for the specified master to stop * Removes this thread from list of running threads. - * @param serverNumber + * @param serverNumber index of regionserver * @return Name of master that just went down. */ public String waitOnMaster(int serverNumber) { @@ -389,7 +389,7 @@ public class LocalHBaseCluster { /** * Wait for the specified master to stop * Removes this thread from list of running threads. - * @param masterThread + * @param masterThread the MasterThread to wait for * @return Name of master that just went down. */ public String waitOnMaster(JVMClusterUtil.MasterThread masterThread) { @@ -442,6 +442,7 @@ public class LocalHBaseCluster { /** * Start the cluster. + * @throws IOException if an IO error occurs */ public void startup() throws IOException { JVMClusterUtil.startup(this.masterThreads, this.regionThreads); @@ -465,8 +466,8 @@ public class LocalHBaseCluster { /** * Test things basically work. - * @param args - * @throws IOException + * @param args arguments to run with + * @throws IOException if an IO error occurs */ public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index 365c0b8..a7eb80c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -34,11 +34,13 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; public interface Server extends Abortable, Stoppable { /** * Gets the configuration object for this server. + * @return the configuration instance */ Configuration getConfiguration(); /** * Gets the ZooKeeper instance for this server. + * @return the Zookeeper instance */ ZooKeeperWatcher getZooKeeper(); @@ -47,6 +49,7 @@ public interface Server extends Abortable, Stoppable { * * Important note: this method returns a reference to Connection which is managed * by Server itself, so callers must NOT attempt to close connection obtained. + * @return the ClusterConnection reference */ ClusterConnection getConnection(); @@ -65,6 +68,7 @@ public interface Server extends Abortable, Stoppable { /** * Get CoordinatedStateManager instance for this server. + * @return the CoordinatedStateManager instance */ CoordinatedStateManager getCoordinatedStateManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index 1feb417..53d7f19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -153,7 +153,7 @@ public class SplitLogTask { /** * @param data Serialized date to parse. * @return An SplitLogTaskState instance made of the passed data - * @throws DeserializationException + * @throws DeserializationException if failed to deserialize the given data * @see #toByteArray() */ public static SplitLogTask parseFrom(final byte [] data) throws DeserializationException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index 33ae1d5..2862827 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -29,17 +29,18 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public interface TableDescriptors { /** - * @param tableName + * @param tableName the fully qualified table name instance of the table. * @return HTableDescriptor for tablename - * @throws IOException + * @throws IOException if an IO error occurs */ HTableDescriptor get(final TableName tableName) throws IOException; /** * Get Map of all NamespaceDescriptors for a given namespace. + * @param name the name of the namespace * @return Map of all descriptors. - * @throws IOException + * @throws IOException if an IO error occurs */ Map getByNamespace(String name) throws IOException; @@ -48,7 +49,7 @@ public interface TableDescriptors { * Get Map of all HTableDescriptors. Populates the descriptor cache as a * side effect. * @return Map of all descriptors. - * @throws IOException + * @throws IOException if an IO error occurs */ Map getAll() throws IOException; @@ -56,26 +57,28 @@ public interface TableDescriptors { /** * Add or update descriptor * @param htd Descriptor to set into TableDescriptors - * @throws IOException + * @throws IOException if an IO error occurs */ void add(final HTableDescriptor htd) throws IOException; /** - * @param tablename + * @param tablename the fully qualified table name instance of the table. * @return Instance of table descriptor or null if none found. - * @throws IOException + * @throws IOException if an IO error occurs */ HTableDescriptor remove(final TableName tablename) throws IOException; /** * Enables the tabledescriptor cache + * @throws IOException if an IO error occurs */ void setCacheOn() throws IOException; /** * Disables the tabledescriptor cache + * @throws IOException if an IO error occurs */ void setCacheOff() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java index 21c09b8..3ff1629 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java @@ -73,11 +73,16 @@ public interface TableStateManager { throws CoordinatedStateException; /** + * @param tableName the fully qualified table name instance of the table + * @param states the list of states * @return true if the table is in any one of the listed states, false otherwise. */ boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states); /** + * @param tableName the fully qualified table name instance of the table + * @param checkSource whether to check source on zookeeper + * @param states the list of states * @return true if the table is in any one of the listed states, false otherwise. */ boolean isTableState(TableName tableName, boolean checkSource, @@ -99,7 +104,10 @@ public interface TableStateManager { boolean isTablePresent(TableName tableName); /** + * @param states the list of states * @return set of tables which are in any one of the listed states, empty Set if none + * @throws InterruptedIOException if interrupted during operation + * @throws CoordinatedStateException if error happened while performing operation */ Set getTablesInStates(ZooKeeperProtos.Table.State... states) throws InterruptedIOException, CoordinatedStateException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index ce32aab..bc5747c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -51,6 +51,7 @@ public class ZNodeClearer { /** * Logs the errors without failing on exception. + * @param fileContent content to write to znode file */ public static void writeMyEphemeralNodeOnDisk(String fileContent) { String fileName = ZNodeClearer.getMyEphemeralNodeFileName(); @@ -87,6 +88,8 @@ public class ZNodeClearer { /** * read the content of znode file, expects a single line. + * @return the ephemeral node + * @throws IOException if an IO error occurs */ public static String readMyEphemeralNodeOnDisk() throws IOException { String fileName = getMyEphemeralNodeFileName(); @@ -106,6 +109,7 @@ public class ZNodeClearer { /** * Get the name of the file used to store the znode contents + * @return the znode file name */ public static String getMyEphemeralNodeFileName() { return System.getenv().get("HBASE_ZNODE_FILE"); @@ -125,6 +129,7 @@ public class ZNodeClearer { /** * Delete the master znode if its content (ServerName string) is the same * as the one in the znode file. (env: HBASE_ZNODE_FILE). + * @param conf the Configuration instance * @return true on successful deletion, false otherwise. */ public static boolean clear(Configuration conf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java index 76edbf4..0372911 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationManager.java @@ -85,7 +85,7 @@ public class ConfigurationManager { /** * Register an observer class - * @param observer + * @param observer the observer to register */ public void registerObserver(ConfigurationObserver observer) { synchronized (configurationObservers) { @@ -98,7 +98,7 @@ public class ConfigurationManager { /** * Deregister an observer class - * @param observer + * @param observer the observer to de-register */ public void deregisterObserver(ConfigurationObserver observer) { synchronized (configurationObservers) { @@ -112,6 +112,7 @@ public class ConfigurationManager { /** * The conf object has been repopulated from disk, and we have to notify * all the observers that are expressed interest to do that. + * @param conf the Configuration object to update */ public void notifyAllObservers(Configuration conf) { synchronized (configurationObservers) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java index 211e371..3795579 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/conf/ConfigurationObserver.java @@ -33,6 +33,7 @@ public interface ConfigurationObserver { /** * This method would be called by the {@link ConfigurationManager} * object when the {@link Configuration} object is reloaded from disk. + * @param conf the {@link Configuration} object to reload */ void onConfigurationChange(Configuration conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java index 9bffc5c..d4104a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java @@ -19,7 +19,6 @@ /** * Restrict the domain of a data attribute, often times to fulfill business rules/requirements. * -

Table of Contents

  • Overview
  • @@ -49,7 +48,6 @@

    By default, constraints are disabled on a table. This means you will not see any slow down on a table if constraints are not enabled. -

    Concurrency and Atomicity

    Currently, no attempts at enforcing correctness in a multi-threaded scenario when modifying a constraint, via diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index f79e5d8..da397f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -54,29 +54,35 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan CoordinatedStateException; /** * Method to retrieve coordination for split log worker + * @return the coordination for split log worker */ public abstract SplitLogWorkerCoordination getSplitLogWorkerCoordination(); /** * Method to retrieve coordination for split log manager + * @return the coordination for split log manager */ public abstract SplitLogManagerCoordination getSplitLogManagerCoordination(); /** * Method to retrieve coordination for split transaction. + * @return the coordination for split transaction */ abstract public SplitTransactionCoordination getSplitTransactionCoordination(); /** * Method to retrieve coordination for closing region operations. + * @return the coordination for closing region operations */ public abstract CloseRegionCoordination getCloseRegionCoordination(); /** * Method to retrieve coordination for opening region operations. + * @return the coordination for opening region operations */ public abstract OpenRegionCoordination getOpenRegionCoordination(); /** * Method to retrieve coordination for region merge transaction + * @return the coordination for region merge transaction */ public abstract RegionMergeCoordination getRegionMergeCoordination(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java index 037d886..7037d9a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/CloseRegionCoordination.java @@ -49,6 +49,7 @@ public interface CloseRegionCoordination { /** * Construct CloseRegionDetails instance from CloseRegionRequest. + * @param request the protobuf request * @return instance of CloseRegionDetails */ CloseRegionDetails parseFromProtoRequest(AdminProtos.CloseRegionRequest request); @@ -57,6 +58,7 @@ public interface CloseRegionCoordination { * Get details object with params for case when we're closing on * regionserver side internally (not because of RPC call from master), * so we don't parse details from protobuf request. + * @return the {@link CloseRegionDetails} object */ CloseRegionDetails getDetaultDetails(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java index 25b743c..5c3710b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/OpenRegionCoordination.java @@ -42,7 +42,7 @@ public interface OpenRegionCoordination { * @param r Region we're working on. * @param ord details about region opening task * @return whether transition was successful or not - * @throws java.io.IOException + * @throws java.io.IOException if an IO error occurs */ boolean transitionToOpened(HRegion r, OpenRegionDetails ord) throws IOException; @@ -87,6 +87,7 @@ public interface OpenRegionCoordination { /** * Construct OpenRegionDetails instance from part of protobuf request. + * @param regionOpenInfo the RegionOpenInfo part of the protobuf OpenRegionRequest * @return instance of OpenRegionDetails. */ OpenRegionDetails parseFromProtoRequest(AdminProtos.OpenRegionRequest.RegionOpenInfo @@ -95,6 +96,7 @@ public interface OpenRegionCoordination { /** * Get details object with params for case when we're opening on * regionserver side with all "default" properties. + * @return the {@link OpenRegionDetails} object */ OpenRegionDetails getDetailsForNonCoordinatedOpening(); @@ -105,6 +107,9 @@ public interface OpenRegionCoordination { /** * Commits opening operation on HM side (steps required for "commit" * are determined by coordination implementation). + * @param assignmentManager the {@link AssignmentManager} object + * @param regionInfo {@link HRegionInfo} for the region to open + * @param ord the {@link OpenRegionDetails} object * @return true if committed successfully, false otherwise. */ public boolean commitOpenOnMasterSide(AssignmentManager assignmentManager, @@ -118,6 +123,7 @@ public interface OpenRegionCoordination { static interface OpenRegionDetails { /** * Sets server name on which opening operation is running. + * @param serverName server name to set */ void setServerName(ServerName serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java index 8015f4c..da08688 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/RegionMergeCoordination.java @@ -45,14 +45,21 @@ public interface RegionMergeCoordination { * Start the region merge transaction * @param region region to be created as offline * @param serverName server event originates from - * @throws IOException + * @param a one region to be merged + * @param b the other region to be merged + * @throws IOException if an IO error occurs */ void startRegionMergeTransaction(HRegionInfo region, ServerName serverName, HRegionInfo a, HRegionInfo b) throws IOException; /** * Get everything ready for region merge - * @throws IOException + * @param services Hosting service instance + * @param mergedRegionInfo the merged region info + * @param region_a one region to be merged + * @param region_b the other region to be merged + * @param details region merge transaction details + * @throws IOException if an IO error occurs */ void waitForRegionMergeTransaction(RegionServerServices services, HRegionInfo mergedRegionInfo, HRegion region_a, HRegion region_b, RegionMergeDetails details) throws IOException; @@ -75,7 +82,7 @@ public interface RegionMergeCoordination { * @param b merging region B * @param serverName server event originates from * @param rmd region merge details - * @throws IOException + * @throws IOException if an IO error occurs */ void processRegionMergeRequest(HRegionInfo merged, HRegionInfo a, HRegionInfo b, ServerName serverName, RegionMergeDetails rmd) throws IOException; @@ -87,7 +94,7 @@ public interface RegionMergeCoordination { * @param region_a merging region A * @param region_b merging region B * @param rmd region merge details - * @param mergedRegion + * @param mergedRegion the merged region info * @throws IOException If thrown, transaction failed. Call * {@link org.apache.hadoop.hbase.regionserver.RegionMergeTransaction#rollback( * Server, RegionServerServices)} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index 917df5b..25d6261 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -103,11 +103,13 @@ public interface SplitLogManagerCoordination { /** * Provide the configuration from the SplitLogManager + * @param details {@link SplitLogManagerDetails} to set */ void setDetails(SplitLogManagerDetails details); /** * Returns the configuration that was provided previously + * @return the {@link SplitLogManagerDetails} */ SplitLogManagerDetails getDetails(); @@ -123,7 +125,7 @@ public interface SplitLogManagerCoordination { * @param serverName server name * @param userRegions set of regions to be marked * @throws IOException in case of failure - * @throws InterruptedIOException + * @throws InterruptedIOException if interrupted */ void markRegionsRecovering(final ServerName serverName, Set userRegions) throws IOException, InterruptedIOException; @@ -137,12 +139,14 @@ public interface SplitLogManagerCoordination { * It removes recovering regions from Coordination * @param serverNames servers which are just recovered * @param isMetaRecovery whether current recovery is for the meta region on - * serverNames + * the given server + * @throws IOException if an IO error occurs */ void removeRecoveringRegions(Set serverNames, Boolean isMetaRecovery) throws IOException; /** * Return the number of remaining tasks + * @return remaining task number */ int remainingTasksInCoordination(); @@ -155,7 +159,7 @@ public interface SplitLogManagerCoordination { /** * Change the recovery mode. * @param b the recovery mode state - * @throws InterruptedIOException + * @throws InterruptedIOException if interrupted * @throws IOException in case of failure */ void setRecoveryMode(boolean b) throws InterruptedIOException, IOException; @@ -164,7 +168,7 @@ public interface SplitLogManagerCoordination { * Removes known stale servers * @param knownServers set of previously failed servers * @throws IOException in case of failure - * @throws InterruptedIOException + * @throws InterruptedIOException if interrupted */ void removeStaleRecoveringRegions(Set knownServers) throws IOException, InterruptedIOException; @@ -213,7 +217,7 @@ public interface SplitLogManagerCoordination { /** * Support method to init constants such as timeout. Mostly required for UTs. - * @throws IOException + * @throws IOException if an IO error occurs */ @VisibleForTesting void init() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java index 3466d04..39c5d7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitTransactionCoordination.java @@ -55,7 +55,7 @@ public interface SplitTransactionCoordination { * @param serverName server event originates from * @param hri_a daughter region * @param hri_b daughter region - * @throws IOException + * @throws IOException if an IO error occurs */ void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a, HRegionInfo hri_b) throws IOException; @@ -67,7 +67,7 @@ public interface SplitTransactionCoordination { * @param hri_a daughter region * @param hri_b daughter region * @param std split transaction details - * @throws IOException + * @throws IOException if an IO error occurs */ void waitForSplitTransaction(final RegionServerServices services, Region parent, HRegionInfo hri_a, HRegionInfo hri_b, SplitTransactionDetails std) @@ -79,7 +79,7 @@ public interface SplitTransactionCoordination { * @param first daughter region * @param second daughter region * @param std split transaction details - * @param parent + * @param parent the parent region of the split * @throws IOException If thrown, transaction failed. Call * {@link org.apache.hadoop.hbase.regionserver. * SplitTransaction#rollback(Server, RegionServerServices)} @@ -95,6 +95,13 @@ public interface SplitTransactionCoordination { /** * Required by AssignmentManager + * @param p the parent region + * @param hri_a first half of the region split + * @param hri_b second half of the region split + * @param sn {@link ServerName} of the server where the region transition is happening + * @param std details of the split transaction + * @return version of the transitioned znode if success, or -1 if fail + * @throws IOException if an IO error occurs */ int processTransition(HRegionInfo p, HRegionInfo hri_a, HRegionInfo hri_b, ServerName sn, SplitTransactionDetails std) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index acdcf60..0566992 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -290,7 +290,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements * region server hosting the region can allow reads to the recovered region * @param recoveredServerNameSet servers which are just recovered * @param isMetaRecovery whether current recovery is for the meta region on - * serverNames + * the given servers + * @throws IOException if an IO error occurs */ @Override public void removeRecoveringRegions(final Set recoveredServerNameSet, @@ -782,8 +783,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements /** * This function is to set recovery mode from outstanding split log tasks from before or current * configuration setting - * @param isForInitialization - * @throws IOException + * @param isForInitialization whether we're in process of initialization + * @throws IOException if an IO error occurs */ @Override public void setRecoveryMode(boolean isForInitialization) throws IOException { @@ -946,8 +947,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements * partially done tasks are present. taskname is the name of the task that was put up in * zookeeper. *

    - * @param workerName - * @param taskname + * @param workerName name of the server which holds the task + * @param taskname name of the task * @return DONE if task completed successfully, ERR otherwise */ Status finish(ServerName workerName, String taskname); @@ -1133,6 +1134,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements /** * Temporary function that is used by unit tests only + * @param b the boolean to set */ public void setIgnoreDeleteForTesting(boolean b) { ignoreZKDeleteForTesting = b; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java index 5b831ee..b88a2ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitTransactionCoordination.java @@ -58,7 +58,7 @@ public class ZKSplitTransactionCoordination implements SplitTransactionCoordinat * @param serverName server event originates from * @param hri_a daughter region * @param hri_b daughter region - * @throws IOException + * @throws IOException if an IO error occurs */ @Override @@ -208,7 +208,7 @@ public class ZKSplitTransactionCoordination implements SplitTransactionCoordinat * @param a daughter region * @param b daughter region * @param std split transaction details - * @param parent + * @param parent the parent region * @throws IOException If thrown, transaction failed. Call * {@link org.apache.hadoop.hbase.regionserver.SplitTransaction#rollback( * Server, RegionServerServices)} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java index 812bbe2..33f1446 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkOpenRegionCoordination.java @@ -60,7 +60,7 @@ public class ZkOpenRegionCoordination implements OpenRegionCoordination { /** * @param r Region we're working on. * @return whether znode is successfully transitioned to OPENED state. - * @throws java.io.IOException + * @throws java.io.IOException if an IO error occurs */ @Override public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord) throws IOException { @@ -178,7 +178,7 @@ public class ZkOpenRegionCoordination implements OpenRegionCoordination { * * This is not guaranteed to succeed, we just do our best. * - * @param rsServices + * @param rsServices the server we're working on * @param hri Region we're working on. * @param ord Details about region open task * @return whether znode is successfully transitioned to FAILED_OPEN state. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java index 1d26cba..8641366 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkRegionMergeCoordination.java @@ -88,7 +88,7 @@ public class ZkRegionMergeCoordination implements RegionMergeCoordination { * we periodically transition the node so that master gets the callback. * If the node is removed or is not in pending_merge state any more, * we abort the merge. - * @throws IOException + * @throws IOException if an IO error occurs */ @Override @@ -164,7 +164,7 @@ public class ZkRegionMergeCoordination implements RegionMergeCoordination { * * @param region region to be created as offline * @param serverName server event originates from - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void startRegionMergeTransaction(final HRegionInfo region, final ServerName serverName, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 637920b..5f69215 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -359,6 +359,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements * @param zkw zk wathcer * @param server name * @param task to own + * @param mode the recovery mode * @param taskZKVersion version of the task in zk * @return non-negative integer value when task can be owned by current region server otherwise -1 */ @@ -402,7 +403,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements *

    * Synchronization using {@link #taskReadyLock} ensures that it will try to grab every task that * has been put up - * @throws InterruptedException + * @throws InterruptedException if interrupted */ @Override public void taskLoop() throws InterruptedException { @@ -583,8 +584,9 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements /** * endTask() can fail and the only way to recover out of it is for the * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. - * @param slt - * @param ctr + * @param slt the {@link SplitLogTask} to finish + * @param ctr the counter to change + * @param details the details of the split task */ @Override public void endTask(SplitLogTask slt, AtomicLong ctr, SplitTaskDetails details) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 81c933b..0941d47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -229,7 +229,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { /** * Gives the row count for the given column family and column qualifier, in * the given row range as defined in the Scan object. - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void getRowNum(RpcController controller, AggregateRequest request, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java index c7f0b90..9814292 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java @@ -38,7 +38,8 @@ public interface BulkLoadObserver extends Coprocessor { * Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master - * @throws IOException + * @param request the request + * @throws IOException if an IO error occurs */ void prePrepareBulkLoad(ObserverContext ctx, PrepareBulkLoadRequest request) throws IOException; @@ -47,7 +48,8 @@ public interface BulkLoadObserver extends Coprocessor { * Called as part of SecureBulkLoadEndpoint.cleanupBulkLoad() RPC call. * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master - * @throws IOException + * @param request the request + * @throws IOException if an IO error occurs */ void preCleanupBulkLoad(ObserverContext ctx, CleanupBulkLoadRequest request) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index b9c29fa..52f3aca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -118,6 +118,7 @@ public abstract class CoprocessorHost { * regionserver. * (HBASE-4070: Improve region server metrics to report loaded coprocessors * to master). + * @return set of simple names of the coprocessors */ public Set getCoprocessors() { Set returnValue = new TreeSet(); @@ -130,6 +131,8 @@ public abstract class CoprocessorHost { /** * Load system coprocessors once only. Read the class names from configuration. * Called by constructor. + * @param conf the {@link Configuration} instance + * @param confKey key of the default system coprocessors */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, @@ -176,6 +179,7 @@ public abstract class CoprocessorHost { * @param priority chaining priority * @param conf configuration for coprocessor * @throws java.io.IOException Exception + * @return the coprocessor implementation */ public E load(Path path, String className, int priority, Configuration conf) throws IOException { @@ -231,6 +235,7 @@ public abstract class CoprocessorHost { * @param priority priority * @param conf configuration * @throws java.io.IOException Exception + * @return the coprocessor implementation */ public E loadInstance(Class implClass, int priority, Configuration conf) throws IOException { @@ -263,6 +268,12 @@ public abstract class CoprocessorHost { /** * Called when a new Coprocessor class is loaded + * @param implClass Implementation class + * @param instance the instance of {@link Coprocessor} implementation + * @param priority priority + * @param sequence sequence id + * @param conf configuration + * @return the coprocessor environment extension */ public abstract E createEnvironment(Class implClass, Coprocessor instance, int priority, int sequence, Configuration conf); @@ -297,6 +308,7 @@ public abstract class CoprocessorHost { /** * Find list of coprocessors that extend/implement the given class/interface * @param cls the class/interface to look for + * @param class type of the coprocessor * @return the list of coprocessors, or null if not found */ public List findCoprocessors(Class cls) { @@ -392,6 +404,8 @@ public abstract class CoprocessorHost { * Constructor * @param impl the coprocessor instance * @param priority chaining priority + * @param seq sequence id + * @param conf configuration */ public Environment(final Coprocessor impl, final int priority, final int seq, final Configuration conf) { @@ -403,7 +417,10 @@ public abstract class CoprocessorHost { this.conf = conf; } - /** Initialize the environment */ + /** + * Initialize the environment + * @throws IOException if an IO error occurs + */ public void startup() throws IOException { if (state == Coprocessor.State.INSTALLED || state == Coprocessor.State.STOPPED) { @@ -594,6 +611,7 @@ public abstract class CoprocessorHost { * @param methodName the name of the non-deprecated method version * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are * declared. + * @return true if it's OK to use the legacy method */ @InterfaceAudience.Private protected static boolean useLegacyMethod(final Class clazz, @@ -638,6 +656,8 @@ public abstract class CoprocessorHost { * Used in concert with {@link #useLegacyMethod(Class, String, Class[])} when a runtime issue * prevents properly supporting the legacy version of a coprocessor API. * Since coprocessors can be in tight loops this serves to limit the amount of log spam we create. + * @param clazz the class of coprocessor + * @param message the message to log */ @InterfaceAudience.Private protected void legacyWarning(final Class clazz, final String message) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java index 1076437..49a82d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/EndpointObserver.java @@ -48,7 +48,7 @@ public interface EndpointObserver extends Coprocessor { * @param methodName the invoked service method * @param request the request message * @return the possibly modified message - * @throws IOException + * @throws IOException if an IO error occurs */ Message preEndpointInvocation(ObserverContext ctx, Service service, String methodName, Message request) throws IOException; @@ -61,7 +61,7 @@ public interface EndpointObserver extends Coprocessor { * @param methodName the invoked service method * @param request the request message * @param responseBuilder the response message builder - * @throws IOException + * @throws IOException if an IO error occurs */ void postEndpointInvocation(ObserverContext ctx, Service service, String methodName, Message request, Message.Builder responseBuilder) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 4e373e7..986c8cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -55,7 +55,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param desc the HTableDescriptor for the table * @param regions the initial regions created for the table - * @throws IOException + * @throws IOException if an IO error occurs */ void preCreateTable(final ObserverContext ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException; @@ -66,7 +66,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param desc the HTableDescriptor for the table * @param regions the initial regions created for the table - * @throws IOException + * @throws IOException if an IO error occurs */ void postCreateTable(final ObserverContext ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException; @@ -78,7 +78,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param desc the HTableDescriptor for the table * @param regions the initial regions created for the table - * @throws IOException + * @throws IOException if an IO error occurs */ void preCreateTableHandler(final ObserverContext ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException; @@ -90,7 +90,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param desc the HTableDescriptor for the table * @param regions the initial regions created for the table - * @throws IOException + * @throws IOException if an IO error occurs */ void postCreateTableHandler(final ObserverContext ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException; @@ -101,6 +101,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preDeleteTable(final ObserverContext ctx, TableName tableName) throws IOException; @@ -110,6 +111,7 @@ public interface MasterObserver extends Coprocessor { * of delete table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postDeleteTable(final ObserverContext ctx, TableName tableName) throws IOException; @@ -121,6 +123,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preDeleteTableHandler( final ObserverContext ctx, TableName tableName) @@ -133,6 +136,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postDeleteTableHandler( final ObserverContext ctx, TableName tableName) @@ -145,6 +149,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preTruncateTable(final ObserverContext ctx, TableName tableName) throws IOException; @@ -156,6 +161,7 @@ public interface MasterObserver extends Coprocessor { * truncate operation is terminated. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postTruncateTable(final ObserverContext ctx, TableName tableName) throws IOException; @@ -167,6 +173,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preTruncateTableHandler( final ObserverContext ctx, TableName tableName) @@ -179,6 +186,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postTruncateTableHandler( final ObserverContext ctx, TableName tableName) @@ -191,6 +199,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param htd the HTableDescriptor + * @throws IOException if an IO error occurs */ void preModifyTable(final ObserverContext ctx, final TableName tableName, HTableDescriptor htd) throws IOException; @@ -201,6 +210,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param htd the HTableDescriptor + * @throws IOException if an IO error occurs */ void postModifyTable(final ObserverContext ctx, final TableName tableName, HTableDescriptor htd) throws IOException; @@ -212,6 +222,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param htd the HTableDescriptor + * @throws IOException if an IO error occurs */ void preModifyTableHandler( final ObserverContext ctx, @@ -224,6 +235,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param htd the HTableDescriptor + * @throws IOException if an IO error occurs */ void postModifyTableHandler( final ObserverContext ctx, @@ -235,6 +247,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param column the HColumnDescriptor + * @throws IOException if an IO error occurs */ void preAddColumn(final ObserverContext ctx, TableName tableName, HColumnDescriptor column) throws IOException; @@ -245,6 +258,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param column the HColumnDescriptor + * @throws IOException if an IO error occurs */ void postAddColumn(final ObserverContext ctx, TableName tableName, HColumnDescriptor column) throws IOException; @@ -255,6 +269,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param column the HColumnDescriptor + * @throws IOException if an IO error occurs */ void preAddColumnHandler( final ObserverContext ctx, @@ -266,6 +281,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param column the HColumnDescriptor + * @throws IOException if an IO error occurs */ void postAddColumnHandler( final ObserverContext ctx, @@ -277,6 +293,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param descriptor the HColumnDescriptor + * @throws IOException if an IO error occurs */ void preModifyColumn(final ObserverContext ctx, TableName tableName, HColumnDescriptor descriptor) throws IOException; @@ -287,6 +304,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param descriptor the HColumnDescriptor + * @throws IOException if an IO error occurs */ void postModifyColumn(final ObserverContext ctx, TableName tableName, HColumnDescriptor descriptor) throws IOException; @@ -297,6 +315,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param descriptor the HColumnDescriptor + * @throws IOException if an IO error occurs */ void preModifyColumnHandler( final ObserverContext ctx, @@ -308,6 +327,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param descriptor the HColumnDescriptor + * @throws IOException if an IO error occurs */ void postModifyColumnHandler( final ObserverContext ctx, @@ -320,6 +340,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param c the column + * @throws IOException if an IO error occurs */ void preDeleteColumn(final ObserverContext ctx, final TableName tableName, final byte[] c) throws IOException; @@ -330,6 +351,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param c the column + * @throws IOException if an IO error occurs */ void postDeleteColumn(final ObserverContext ctx, final TableName tableName, final byte[] c) throws IOException; @@ -340,6 +362,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param c the column + * @throws IOException if an IO error occurs */ void preDeleteColumnHandler( final ObserverContext ctx, @@ -351,6 +374,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param c the column + * @throws IOException if an IO error occurs */ void postDeleteColumnHandler( final ObserverContext ctx, @@ -361,6 +385,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preEnableTable(final ObserverContext ctx, final TableName tableName) throws IOException; @@ -370,6 +395,7 @@ public interface MasterObserver extends Coprocessor { * of enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postEnableTable(final ObserverContext ctx, final TableName tableName) throws IOException; @@ -380,6 +406,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preEnableTableHandler( final ObserverContext ctx, @@ -390,6 +417,7 @@ public interface MasterObserver extends Coprocessor { * of enable table handler and it is async to the enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postEnableTableHandler( final ObserverContext ctx, @@ -401,6 +429,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preDisableTable(final ObserverContext ctx, final TableName tableName) throws IOException; @@ -410,6 +439,7 @@ public interface MasterObserver extends Coprocessor { * of disable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postDisableTable(final ObserverContext ctx, final TableName tableName) throws IOException; @@ -420,6 +450,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void preDisableTableHandler( final ObserverContext ctx, @@ -430,6 +461,7 @@ public interface MasterObserver extends Coprocessor { * of disable table handler and it is asyn to the disable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table + * @throws IOException if an IO error occurs */ void postDisableTableHandler( final ObserverContext ctx, @@ -441,6 +473,7 @@ public interface MasterObserver extends Coprocessor { * @param region the HRegionInfo * @param srcServer the source ServerName * @param destServer the destination ServerName + * @throws IOException if an IO error occurs */ void preMove(final ObserverContext ctx, final HRegionInfo region, final ServerName srcServer, @@ -453,6 +486,7 @@ public interface MasterObserver extends Coprocessor { * @param region the HRegionInfo * @param srcServer the source ServerName * @param destServer the destination ServerName + * @throws IOException if an IO error occurs */ void postMove(final ObserverContext ctx, final HRegionInfo region, final ServerName srcServer, @@ -462,7 +496,9 @@ public interface MasterObserver extends Coprocessor { /** * Called before a abortProcedure request has been processed. * @param ctx the environment to interact with the framework and master - * @throws IOException + * @param procEnv the procedure executor + * @param procId the procedure id + * @throws IOException if an IO error occurs */ public void preAbortProcedure( ObserverContext ctx, @@ -472,6 +508,7 @@ public interface MasterObserver extends Coprocessor { /** * Called after a abortProcedure request has been processed. * @param ctx the environment to interact with the framework and master + * @throws IOException if an IO error occurs */ public void postAbortProcedure(ObserverContext ctx) throws IOException; @@ -479,7 +516,7 @@ public interface MasterObserver extends Coprocessor { /** * Called before a listProcedures request has been processed. * @param ctx the environment to interact with the framework and master - * @throws IOException + * @throws IOException if an IO error occurs */ void preListProcedures(ObserverContext ctx) throws IOException; @@ -488,6 +525,7 @@ public interface MasterObserver extends Coprocessor { * Called after a listProcedures request has been processed. * @param ctx the environment to interact with the framework and master * @param procInfoList the list of procedures about to be returned + * @throws IOException if an IO error occurs */ void postListProcedures( ObserverContext ctx, @@ -497,6 +535,7 @@ public interface MasterObserver extends Coprocessor { * Called prior to assigning a specific region. * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region + * @throws IOException if an IO error occurs */ void preAssign(final ObserverContext ctx, final HRegionInfo regionInfo) throws IOException; @@ -505,6 +544,7 @@ public interface MasterObserver extends Coprocessor { * Called after the region assignment has been requested. * @param ctx the environment to interact with the framework and master * @param regionInfo the regionInfo of the region + * @throws IOException if an IO error occurs */ void postAssign(final ObserverContext ctx, final HRegionInfo regionInfo) throws IOException; @@ -512,8 +552,9 @@ public interface MasterObserver extends Coprocessor { /** * Called prior to unassigning a given region. * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param regionInfo the {@link HRegionInfo} for region * @param force whether to force unassignment or not + * @throws IOException if an IO error occurs */ void preUnassign(final ObserverContext ctx, final HRegionInfo regionInfo, final boolean force) throws IOException; @@ -521,8 +562,9 @@ public interface MasterObserver extends Coprocessor { /** * Called after the region unassignment has been requested. * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param regionInfo the {@link HRegionInfo} for region * @param force whether to force unassignment or not + * @throws IOException if an IO error occurs */ void postUnassign(final ObserverContext ctx, final HRegionInfo regionInfo, final boolean force) throws IOException; @@ -531,7 +573,8 @@ public interface MasterObserver extends Coprocessor { * Called prior to marking a given region as offline. ctx.bypass() will not have any * impact on this hook. * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param regionInfo the {@link HRegionInfo} for region + * @throws IOException if an IO error occurs */ void preRegionOffline(final ObserverContext ctx, final HRegionInfo regionInfo) throws IOException; @@ -539,7 +582,8 @@ public interface MasterObserver extends Coprocessor { /** * Called after the region has been marked offline. * @param ctx the environment to interact with the framework and master - * @param regionInfo + * @param regionInfo the {@link HRegionInfo} for region + * @throws IOException if an IO error occurs */ void postRegionOffline(final ObserverContext ctx, final HRegionInfo regionInfo) throws IOException; @@ -548,6 +592,7 @@ public interface MasterObserver extends Coprocessor { * Called prior to requesting rebalancing of the cluster regions, though after * the initial checks for regions in transition and the balance switch flag. * @param ctx the environment to interact with the framework and master + * @throws IOException if an IO error occurs */ void preBalance(final ObserverContext ctx) throws IOException; @@ -558,6 +603,7 @@ public interface MasterObserver extends Coprocessor { * @param plans the RegionPlans which master has executed. RegionPlan serves as hint * as for the final destination for the underlying region but may not represent the * final state of assignment + * @throws IOException if an IO error occurs */ void postBalance(final ObserverContext ctx, List plans) throws IOException; @@ -566,6 +612,8 @@ public interface MasterObserver extends Coprocessor { * Called prior to modifying the flag used to enable/disable region balancing. * @param ctx the coprocessor instance's environment * @param newValue the new flag value submitted in the call + * @throws IOException if an IO error occurs + * @return true if success */ boolean preBalanceSwitch(final ObserverContext ctx, final boolean newValue) throws IOException; @@ -575,6 +623,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the coprocessor instance's environment * @param oldValue the previously set balanceSwitch value * @param newValue the newly set balanceSwitch value + * @throws IOException if an IO error occurs */ void postBalanceSwitch(final ObserverContext ctx, final boolean oldValue, final boolean newValue) throws IOException; @@ -582,6 +631,8 @@ public interface MasterObserver extends Coprocessor { /** * Called prior to shutting down the full HBase cluster, including this * {@link org.apache.hadoop.hbase.master.HMaster} process. + * @param ctx the coprocessor instance's environment + * @throws IOException if an IO error occurs */ void preShutdown(final ObserverContext ctx) throws IOException; @@ -590,6 +641,8 @@ public interface MasterObserver extends Coprocessor { /** * Called immediately prior to stopping this * {@link org.apache.hadoop.hbase.master.HMaster} process. + * @param ctx the coprocessor instance's environment + * @throws IOException if an IO error occurs */ void preStopMaster(final ObserverContext ctx) throws IOException; @@ -598,6 +651,8 @@ public interface MasterObserver extends Coprocessor { * Called immediately after an active master instance has completed * initialization. Will not be called on standby master instances unless * they take over the active role. + * @param ctx the coprocessor instance's environment + * @throws IOException if an IO error occurs */ void postStartMaster(final ObserverContext ctx) throws IOException; @@ -605,6 +660,8 @@ public interface MasterObserver extends Coprocessor { /** * Call before the master initialization is set to true. * {@link org.apache.hadoop.hbase.master.HMaster} process. + * @param ctx the coprocessor instance's environment + * @throws IOException if an IO error occurs */ void preMasterInitialization(final ObserverContext ctx) throws IOException; @@ -616,7 +673,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param hTableDescriptor the hTableDescriptor of the table to snapshot - * @throws IOException + * @throws IOException if an IO error occurs */ void preSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) @@ -628,7 +685,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param hTableDescriptor the hTableDescriptor of the table to snapshot - * @throws IOException + * @throws IOException if an IO error occurs */ void postSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) @@ -639,7 +696,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to list - * @throws IOException + * @throws IOException if an IO error occurs */ void preListSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot) throws IOException; @@ -649,7 +706,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to list - * @throws IOException + * @throws IOException if an IO error occurs */ void postListSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot) throws IOException; @@ -661,7 +718,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param hTableDescriptor the hTableDescriptor of the table to create - * @throws IOException + * @throws IOException if an IO error occurs */ void preCloneSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) @@ -673,7 +730,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param hTableDescriptor the hTableDescriptor of the table to create - * @throws IOException + * @throws IOException if an IO error occurs */ void postCloneSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) @@ -686,7 +743,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param hTableDescriptor the hTableDescriptor of the table to restore - * @throws IOException + * @throws IOException if an IO error occurs */ void preRestoreSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) @@ -698,7 +755,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param hTableDescriptor the hTableDescriptor of the table to restore - * @throws IOException + * @throws IOException if an IO error occurs */ void postRestoreSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) @@ -710,7 +767,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete - * @throws IOException + * @throws IOException if an IO error occurs */ void preDeleteSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot) throws IOException; @@ -720,7 +777,7 @@ public interface MasterObserver extends Coprocessor { * Called as part of deleteSnapshot RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor of the snapshot to delete - * @throws IOException + * @throws IOException if an IO error occurs */ void postDeleteSnapshot(final ObserverContext ctx, final SnapshotDescription snapshot) throws IOException; @@ -730,7 +787,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableNamesList the list of table names, or null if querying for all * @param descriptors an empty list, can be filled with what to return if bypassing - * @throws IOException + * @throws IOException if an IO error occurs * @deprecated Use preGetTableDescriptors with regex instead. */ @Deprecated @@ -741,7 +798,7 @@ public interface MasterObserver extends Coprocessor { * Called after a getTableDescriptors request has been processed. * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned - * @throws IOException + * @throws IOException if an IO error occurs * @deprecated Use postGetTableDescriptors with regex instead. */ @Deprecated @@ -754,7 +811,7 @@ public interface MasterObserver extends Coprocessor { * @param tableNamesList the list of table names, or null if querying for all * @param descriptors an empty list, can be filled with what to return if bypassing * @param regex regular expression used for filtering the table names - * @throws IOException + * @throws IOException if an IO error occurs */ void preGetTableDescriptors(ObserverContext ctx, List tableNamesList, List descriptors, @@ -766,7 +823,7 @@ public interface MasterObserver extends Coprocessor { * @param tableNamesList the list of table names, or null if querying for all * @param descriptors the list of descriptors about to be returned * @param regex regular expression used for filtering the table names - * @throws IOException + * @throws IOException if an IO error occurs */ void postGetTableDescriptors(ObserverContext ctx, List tableNamesList, List descriptors, @@ -777,7 +834,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param descriptors an empty list, can be filled with what to return if bypassing * @param regex regular expression used for filtering the table names - * @throws IOException + * @throws IOException if an IO error occurs */ void preGetTableNames(ObserverContext ctx, List descriptors, String regex) throws IOException; @@ -787,7 +844,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned * @param regex regular expression used for filtering the table names - * @throws IOException + * @throws IOException if an IO error occurs */ void postGetTableNames(ObserverContext ctx, List descriptors, String regex) throws IOException; @@ -798,7 +855,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor for the table - * @throws IOException + * @throws IOException if an IO error occurs */ void preCreateNamespace(final ObserverContext ctx, NamespaceDescriptor ns) throws IOException; @@ -806,7 +863,7 @@ public interface MasterObserver extends Coprocessor { * Called after the createNamespace operation has been requested. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor for the table - * @throws IOException + * @throws IOException if an IO error occurs */ void postCreateNamespace(final ObserverContext ctx, NamespaceDescriptor ns) throws IOException; @@ -817,6 +874,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace + * @throws IOException if an IO error occurs */ void preDeleteNamespace(final ObserverContext ctx, String namespace) throws IOException; @@ -825,6 +883,7 @@ public interface MasterObserver extends Coprocessor { * Called after the deleteNamespace operation has been requested. * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace + * @throws IOException if an IO error occurs */ void postDeleteNamespace(final ObserverContext ctx, String namespace) throws IOException; @@ -834,6 +893,7 @@ public interface MasterObserver extends Coprocessor { * It can't bypass the default action, e.g., ctx.bypass() won't have effect. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor + * @throws IOException if an IO error occurs */ void preModifyNamespace(final ObserverContext ctx, NamespaceDescriptor ns) throws IOException; @@ -842,6 +902,7 @@ public interface MasterObserver extends Coprocessor { * Called after the modifyNamespace operation has been requested. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor + * @throws IOException if an IO error occurs */ void postModifyNamespace(final ObserverContext ctx, NamespaceDescriptor ns) throws IOException; @@ -850,7 +911,7 @@ public interface MasterObserver extends Coprocessor { * Called before a getNamespaceDescriptor request has been processed. * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace - * @throws IOException + * @throws IOException if an IO error occurs */ void preGetNamespaceDescriptor(ObserverContext ctx, String namespace) throws IOException; @@ -859,7 +920,7 @@ public interface MasterObserver extends Coprocessor { * Called after a getNamespaceDescriptor request has been processed. * @param ctx the environment to interact with the framework and master * @param ns the NamespaceDescriptor - * @throws IOException + * @throws IOException if an IO error occurs */ void postGetNamespaceDescriptor(ObserverContext ctx, NamespaceDescriptor ns) throws IOException; @@ -868,7 +929,7 @@ public interface MasterObserver extends Coprocessor { * Called before a listNamespaceDescriptors request has been processed. * @param ctx the environment to interact with the framework and master * @param descriptors an empty list, can be filled with what to return if bypassing - * @throws IOException + * @throws IOException if an IO error occurs */ void preListNamespaceDescriptors(ObserverContext ctx, List descriptors) throws IOException; @@ -877,7 +938,7 @@ public interface MasterObserver extends Coprocessor { * Called after a listNamespaceDescriptors request has been processed. * @param ctx the environment to interact with the framework and master * @param descriptors the list of descriptors about to be returned - * @throws IOException + * @throws IOException if an IO error occurs */ void postListNamespaceDescriptors(ObserverContext ctx, List descriptors) throws IOException; @@ -887,7 +948,7 @@ public interface MasterObserver extends Coprocessor { * Called before the table memstore is flushed to disk. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @throws IOException + * @throws IOException if an IO error occurs */ void preTableFlush(final ObserverContext ctx, final TableName tableName) throws IOException; @@ -896,7 +957,7 @@ public interface MasterObserver extends Coprocessor { * Called after the table memstore is flushed to disk. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table - * @throws IOException + * @throws IOException if an IO error occurs */ void postTableFlush(final ObserverContext ctx, final TableName tableName) throws IOException; @@ -906,7 +967,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param userName the name of user * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void preSetUserQuota(final ObserverContext ctx, final String userName, final Quotas quotas) throws IOException; @@ -916,7 +977,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param userName the name of user * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void postSetUserQuota(final ObserverContext ctx, final String userName, final Quotas quotas) throws IOException; @@ -927,7 +988,7 @@ public interface MasterObserver extends Coprocessor { * @param userName the name of user * @param tableName the name of the table * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void preSetUserQuota(final ObserverContext ctx, final String userName, final TableName tableName, final Quotas quotas) throws IOException; @@ -938,7 +999,7 @@ public interface MasterObserver extends Coprocessor { * @param userName the name of user * @param tableName the name of the table * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void postSetUserQuota(final ObserverContext ctx, final String userName, final TableName tableName, final Quotas quotas) throws IOException; @@ -949,7 +1010,7 @@ public interface MasterObserver extends Coprocessor { * @param userName the name of user * @param namespace the name of the namespace * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void preSetUserQuota(final ObserverContext ctx, final String userName, final String namespace, final Quotas quotas) throws IOException; @@ -960,7 +1021,7 @@ public interface MasterObserver extends Coprocessor { * @param userName the name of user * @param namespace the name of the namespace * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void postSetUserQuota(final ObserverContext ctx, final String userName, final String namespace, final Quotas quotas) throws IOException; @@ -970,7 +1031,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void preSetTableQuota(final ObserverContext ctx, final TableName tableName, final Quotas quotas) throws IOException; @@ -980,7 +1041,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void postSetTableQuota(final ObserverContext ctx, final TableName tableName, final Quotas quotas) throws IOException; @@ -990,7 +1051,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void preSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException; @@ -1000,7 +1061,7 @@ public interface MasterObserver extends Coprocessor { * @param ctx the environment to interact with the framework and master * @param namespace the name of the namespace * @param quotas the quota settings - * @throws IOException + * @throws IOException if an IO error occurs */ void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 2bdaa16..1c528a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -354,6 +354,7 @@ public interface RegionObserver extends Coprocessor { * Called before the region is split. * @param c the environment provided by the region server * (e.getRegion() returns the parent region) + * @param splitRow the row as split point * @throws IOException if an error occurred on the coprocessor */ void preSplit(final ObserverContext c, byte[] splitRow) @@ -374,10 +375,10 @@ public interface RegionObserver extends Coprocessor { /** * This will be called before PONR step as part of split transaction. Calling * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} rollback the split - * @param ctx - * @param splitKey - * @param metaEntries - * @throws IOException + * @param ctx the environment provided by the region server + * @param splitKey the row as split point + * @param metaEntries actions to operate on meta during split + * @throws IOException if an error occurred on the coprocessor */ void preSplitBeforePONR(final ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException; @@ -387,22 +388,22 @@ public interface RegionObserver extends Coprocessor { * This will be called after PONR step as part of split transaction * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * effect in this hook. - * @param ctx - * @throws IOException + * @param ctx the environment provided by the region server + * @throws IOException if an error occurred on the coprocessor */ void preSplitAfterPONR(final ObserverContext ctx) throws IOException; /** * This will be called before the roll back of the split region is completed - * @param ctx - * @throws IOException + * @param ctx the environment provided by the region server + * @throws IOException if an error occurred on the coprocessor */ void preRollBackSplit(final ObserverContext ctx) throws IOException; /** * This will be called after the roll back of the split region is completed - * @param ctx - * @throws IOException + * @param ctx the environment provided by the region server + * @throws IOException if an error occurred on the coprocessor */ void postRollBackSplit(final ObserverContext ctx) throws IOException; @@ -410,8 +411,8 @@ public interface RegionObserver extends Coprocessor { /** * Called after any split request is processed. This will be called irrespective of success or * failure of the split. - * @param ctx - * @throws IOException + * @param ctx the environment provided by the region server + * @throws IOException if an error occurred on the coprocessor */ void postCompleteSplit(final ObserverContext ctx) throws IOException; @@ -419,7 +420,7 @@ public interface RegionObserver extends Coprocessor { * Called before the region is reported as closed to the master. * @param c the environment provided by the region server * @param abortRequested true if the region server is aborting - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ void preClose(final ObserverContext c, boolean abortRequested) throws IOException; @@ -507,7 +508,7 @@ public interface RegionObserver extends Coprocessor { * coprocessors * @param c the environment provided by the region server * @param get the Get request - * @param exists + * @param exists the default result * @return the value to return to the client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -591,7 +592,7 @@ public interface RegionObserver extends Coprocessor { * @param byteNow - timestamp bytes * @param get - the get formed using the current cell's row. * Note that the get does not specify the family and qualifier - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ void prePrepareTimeStampForDeleteVersion(final ObserverContext c, final Mutation mutation, final Cell cell, final byte[] byteNow, @@ -638,18 +639,18 @@ public interface RegionObserver extends Coprocessor { /** * This will be called for region operations where read lock is acquired in * {@link Region#startRegionOperation()}. - * @param ctx + * @param ctx the environment provided by the region server * @param operation The operation is about to be taken on the region - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ void postStartRegionOperation(final ObserverContext ctx, Operation operation) throws IOException; /** * Called after releasing read lock in {@link Region#closeRegionOperation()}. - * @param ctx - * @param operation - * @throws IOException + * @param ctx the environment provided by the region server + * @param operation The operation is about to be taken on the region + * @throws IOException if an error occurred on the coprocessor */ void postCloseRegionOperation(final ObserverContext ctx, Operation operation) throws IOException; @@ -657,10 +658,10 @@ public interface RegionObserver extends Coprocessor { /** * Called after the completion of batch put/delete and will be called even if the batch operation * fails - * @param ctx - * @param miniBatchOp + * @param ctx the environment provided by the region server + * @param miniBatchOp batch of Mutations applied to region * @param success true if batch operation is successful otherwise false. - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ void postBatchMutateIndispensably(final ObserverContext ctx, MiniBatchOperationInProgress miniBatchOp, final boolean success) throws IOException; @@ -679,7 +680,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param put data to put if check succeeds - * @param result + * @param result the default result to return * @return the return value to return to client if bypassing default * processing * @throws IOException if an error occurred on the coprocessor @@ -708,7 +709,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param put data to put if check succeeds - * @param result + * @param result the default result to return * @return the return value to return to client if bypassing default * processing * @throws IOException if an error occurred on the coprocessor @@ -754,7 +755,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param delete delete to commit if check succeeds - * @param result + * @param result the default result to return * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -782,7 +783,7 @@ public interface RegionObserver extends Coprocessor { * @param compareOp the comparison operation * @param comparator the comparator * @param delete delete to commit if check succeeds - * @param result + * @param result the default result to return * @return the value to return to client if bypassing default processing * @throws IOException if an error occurred on the coprocessor */ @@ -1077,7 +1078,7 @@ public interface RegionObserver extends Coprocessor { * @param length length of rowkey * @param hasMore the 'has more' indication * @return whether more rows are available for the scanner or not - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ boolean postScannerFilterRow(final ObserverContext c, final InternalScanner s, final byte[] currentRow, final int offset, final short length, @@ -1114,6 +1115,11 @@ public interface RegionObserver extends Coprocessor { /** * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit} * replayed for this region. + * @param ctx the environment provided by the region server + * @param info which region the coprocessor is operating on + * @param logKey key of the wal entry + * @param logEdit content of the wal entry + * @throws IOException if an error occurred on the coprocessor */ void preWALRestore(final ObserverContext ctx, HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException; @@ -1136,6 +1142,11 @@ public interface RegionObserver extends Coprocessor { * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per * classloader. * + * @param ctx the environment provided by the region server + * @param info which region the coprocessor is operating on + * @param logKey key of the wal entry + * @param logEdit content of the wal entry + * @throws IOException if an error occurred on the coprocessor * @deprecated use {@link #preWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)} */ @Deprecated @@ -1145,6 +1156,11 @@ public interface RegionObserver extends Coprocessor { /** * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit} * replayed for this region. + * @param ctx the environment provided by the region server + * @param info which region the coprocessor is operating on + * @param logKey key of the wal entry + * @param logEdit content of the wal entry + * @throws IOException if an error occurred on the coprocessor */ void postWALRestore(final ObserverContext ctx, HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException; @@ -1167,6 +1183,11 @@ public interface RegionObserver extends Coprocessor { * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per * classloader. * + * @param ctx the environment provided by the region server + * @param info which region the coprocessor is operating on + * @param logKey key of the wal entry + * @param logEdit content of the wal entry + * @throws IOException if an error occurred on the coprocessor * @deprecated use {@link #postWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)} */ @Deprecated @@ -1177,10 +1198,10 @@ public interface RegionObserver extends Coprocessor { * Called before bulkLoadHFile. Users can create a StoreFile instance to * access the contents of a HFile. * - * @param ctx + * @param ctx the environment provided by the region server * @param familyPaths pairs of { CF, HFile path } submitted for bulk load. Adding * or removing from this list will add or remove HFiles to be bulk loaded. - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ void preBulkLoadHFile(final ObserverContext ctx, List> familyPaths) throws IOException; @@ -1188,11 +1209,11 @@ public interface RegionObserver extends Coprocessor { /** * Called after bulkLoadHFile. * - * @param ctx + * @param ctx the environment provided by the region server * @param familyPaths pairs of { CF, HFile path } submitted for bulk load * @param hasLoaded whether the bulkLoad was successful * @return the new value of hasLoaded - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ boolean postBulkLoadHFile(final ObserverContext ctx, List> familyPaths, boolean hasLoaded) throws IOException; @@ -1207,12 +1228,12 @@ public interface RegionObserver extends Coprocessor { * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf + * @param cacheConf The cache configuration and block cache reference. * @param r original reference file. This will be not null only when reading a split file. * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain * @return a Reader instance to use instead of the base reader if overriding * default behavior, null otherwise - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ StoreFile.Reader preStoreFileReaderOpen(final ObserverContext ctx, final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, @@ -1226,11 +1247,11 @@ public interface RegionObserver extends Coprocessor { * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf + * @param cacheConf The cache configuration and block cache reference. * @param r original reference file. This will be not null only when reading a split file. * @param reader the base reader instance * @return The reader to use - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ StoreFile.Reader postStoreFileReaderOpen(final ObserverContext ctx, final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, @@ -1247,7 +1268,7 @@ public interface RegionObserver extends Coprocessor { * @param oldCell old cell containing previous value * @param newCell the new cell containing the computed value * @return the new cell, possibly changed - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ Cell postMutationBeforeWAL(ObserverContext ctx, MutationType opType, Mutation mutation, Cell oldCell, Cell newCell) throws IOException; @@ -1260,7 +1281,7 @@ public interface RegionObserver extends Coprocessor { * @param ctx the environment provided by the region server * @param delTracker the deleteTracker that is created by the QueryMatcher * @return the Delete Tracker - * @throws IOException + * @throws IOException if an error occurred on the coprocessor */ DeleteTracker postInstantiateDeleteTracker( final ObserverContext ctx, DeleteTracker delTracker) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index b1b94ff..a25947a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -43,22 +43,21 @@ public interface RegionServerObserver extends Coprocessor { /** * Called before the regions merge. * Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} to skip the merge. + * @param ctx an instance of {@link ObserverContext} + * @param regionA region to merge + * @param regionB region to merge * @throws IOException if an error occurred on the coprocessor - * @param ctx - * @param regionA - * @param regionB - * @throws IOException */ void preMerge(final ObserverContext ctx, final Region regionA, final Region regionB) throws IOException; /** * called after the regions merge. - * @param c - * @param regionA - * @param regionB - * @param mergedRegion - * @throws IOException + * @param c an instance of {@link ObserverContext} + * @param regionA region to merge + * @param regionB region to merge + * @param mergedRegion the merged region + * @throws IOException if an IO error occurs */ void postMerge(final ObserverContext c, final Region regionA, final Region regionB, final Region mergedRegion) throws IOException; @@ -66,12 +65,12 @@ public interface RegionServerObserver extends Coprocessor { /** * This will be called before PONR step as part of regions merge transaction. Calling * {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} rollback the merge - * @param ctx - * @param regionA - * @param regionB + * @param ctx an instance of {@link ObserverContext} + * @param regionA region to merge + * @param regionB region to merge * @param metaEntries mutations to execute on hbase:meta atomically with regions merge updates. * Any puts or deletes to execute on hbase:meta can be added to the mutations. - * @throws IOException + * @throws IOException if an IO error occurs */ void preMergeCommit(final ObserverContext ctx, final Region regionA, final Region regionB, @@ -79,31 +78,31 @@ public interface RegionServerObserver extends Coprocessor { /** * This will be called after PONR step as part of regions merge transaction. - * @param ctx - * @param regionA - * @param regionB - * @param mergedRegion - * @throws IOException + * @param ctx an instance of {@link ObserverContext} + * @param regionA region to merge + * @param regionB region to merge + * @param mergedRegion the merged region + * @throws IOException if an IO error occurs */ void postMergeCommit(final ObserverContext ctx, final Region regionA, final Region regionB, final Region mergedRegion) throws IOException; /** * This will be called before the roll back of the regions merge. - * @param ctx - * @param regionA - * @param regionB - * @throws IOException + * @param ctx an instance of {@link ObserverContext} + * @param regionA region to merge + * @param regionB region to merge + * @throws IOException if an IO error occurs */ void preRollBackMerge(final ObserverContext ctx, final Region regionA, final Region regionB) throws IOException; /** * This will be called after the roll back of the regions merge. - * @param ctx - * @param regionA - * @param regionB - * @throws IOException + * @param ctx an instance of {@link ObserverContext} + * @param regionA region to merge + * @param regionB region to merge + * @throws IOException if an IO error occurs */ void postRollBackMerge(final ObserverContext ctx, final Region regionA, final Region regionB) throws IOException; @@ -126,7 +125,7 @@ public interface RegionServerObserver extends Coprocessor { /** * This will be called after the replication endpoint is instantiated. - * @param ctx + * @param ctx an instance of {@link ObserverContext} * @param endpoint - the base endpoint for replication * @return the endpoint to use during replication. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java index bba83cc..73307ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java @@ -51,7 +51,12 @@ public interface WALObserver extends Coprocessor { * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit} * is writen to WAL. * + * @param ctx an instance of {@link ObserverContext} + * @param info the {@link HRegionInfo} for region + * @param logKey key of the wal entry + * @param logEdit edit of the wal entry * @return true if default behavior should be bypassed, false otherwise + * @throws IOException if an IO error occurs */ // TODO: return value is not used boolean preWALWrite(ObserverContext ctx, @@ -75,7 +80,12 @@ public interface WALObserver extends Coprocessor { * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per * classloader. * + * @param ctx an instance of {@link ObserverContext} + * @param info the {@link HRegionInfo} for region + * @param logKey key of the wal entry + * @param logEdit edit of the wal entry * @return true if default behavior should be bypassed, false otherwise + * @throws IOException if an IO error occurs * @deprecated use {@link #preWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)} */ @Deprecated @@ -85,6 +95,11 @@ public interface WALObserver extends Coprocessor { /** * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit} * is writen to WAL. + * @param ctx an instance of {@link ObserverContext} + * @param info the {@link HRegionInfo} for region + * @param logKey key of the wal entry + * @param logEdit edit of the wal entry + * @throws IOException if an IO error occurs */ void postWALWrite(ObserverContext ctx, HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException; @@ -107,6 +122,11 @@ public interface WALObserver extends Coprocessor { * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per * classloader. * + * @param ctx an instance of {@link ObserverContext} + * @param info the {@link HRegionInfo} for region + * @param logKey key of the wal entry + * @param logEdit edit of the wal entry + * @throws IOException if an IO error occurs * @deprecated use {@link #postWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)} */ @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java index 73fd7f2..37a743e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java @@ -96,6 +96,8 @@ public abstract class EventHandler implements Runnable, Comparable { /** * Default base class constructor. + * @param server instance of {@link Server} implementation + * @param eventType the event type */ public EventHandler(Server server, EventType eventType) { this.parent = Trace.currentSpan(); @@ -138,7 +140,7 @@ public abstract class EventHandler implements Runnable, Comparable { /** * This method is the main processing loop to be implemented by the various * subclasses. - * @throws IOException + * @throws IOException if an IO error occurs */ public abstract void process() throws IOException; @@ -214,6 +216,7 @@ public abstract class EventHandler implements Runnable, Comparable { * informative name about what event they are handling. For example, * event-specific information such as which region or server is * being processed should be included if possible. + * @return name of this class */ public String getInformativeName() { return this.getClass().toString(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java index 42cca2b..3719299 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorService.java @@ -315,6 +315,7 @@ public class ExecutorService { * * @param out the stream to write to * @param indent a string prefix for each line, used for indentation + * @throws IOException if an IO error occurs */ public void dumpTo(Writer out, String indent) throws IOException { out.write(indent + "Status for executor: " + executor + "\n"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index fb58360..02112cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -70,6 +70,7 @@ public class HFileSystem extends FilterFileSystem { * @param useHBaseChecksum if true, then use * checksum verfication in hbase, otherwise * delegate checksum verification to the FileSystem. + * @throws IOException if an IO error occurs */ public HFileSystem(Configuration conf, boolean useHBaseChecksum) throws IOException { @@ -134,6 +135,7 @@ public class HFileSystem extends FilterFileSystem { /** * Returns the underlying filesystem * @return The underlying FileSystem for this FilterFileSystem object. + * @throws IOException if an IO error occurs */ public FileSystem getBackingFs() throws IOException { return fs; @@ -367,6 +369,9 @@ public class HFileSystem extends FilterFileSystem { * This returns a filesystem object that avoids checksum * verification in the filesystem for hfileblock-reads. * For these blocks, checksum verification is done by HBase. + * @param conf the Configuration instance + * @return the {@link HFileSystem} instance + * @throws IOException if an IO error occurs */ static public FileSystem get(Configuration conf) throws IOException { return new HFileSystem(conf, true); @@ -374,6 +379,9 @@ public class HFileSystem extends FilterFileSystem { /** * Wrap a LocalFileSystem within a HFileSystem. + * @param conf the Configuration instance + * @return the {@link HFileSystem} instance wrapping a {@link LocalFileSystem} + * @throws IOException if an IO error occurs */ static public FileSystem getLocalFs(Configuration conf) throws IOException { return new HFileSystem(FileSystem.getLocal(conf)); @@ -383,6 +391,7 @@ public class HFileSystem extends FilterFileSystem { * The org.apache.hadoop.fs.FilterFileSystem does not yet support * createNonRecursive. This is a hadoop bug and when it is fixed in Hadoop, * this definition will go away. + * @return the {@link FSDataOutputStream} instance */ @SuppressWarnings("deprecation") public FSDataOutputStream createNonRecursive(Path f, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java index 5950585..f48665e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java @@ -103,6 +103,7 @@ public class FSDataInputStreamWrapper { * reads finish and before any other reads start (what happens in reality is we read the * tail, then call this based on what's in the tail, then read blocks). * @param forceNoHBaseChecksum Force not using HBase checksum. + * @throws IOException if an IO error occurs */ public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOException { if (hfs == null) return; @@ -120,13 +121,20 @@ public class FSDataInputStreamWrapper { } } - /** For use in tests. */ + /** + * For use in tests. + * @param fsdis the input stream + */ @VisibleForTesting public FSDataInputStreamWrapper(FSDataInputStream fsdis) { this(fsdis, fsdis); } - /** For use in tests. */ + /** + * For use in tests. + * @param fsdis the input stream + * @param noChecksum the input stream w/o checksum + */ @VisibleForTesting public FSDataInputStreamWrapper(FSDataInputStream fsdis, FSDataInputStream noChecksum) { doCloseStreams = false; @@ -149,6 +157,7 @@ public class FSDataInputStreamWrapper { * Get the stream to use. Thread-safe. * @param useHBaseChecksum must be the value that shouldUseHBaseChecksum has returned * at some point in the past, otherwise the result is undefined. + * @return the stream to use */ public FSDataInputStream getStream(boolean useHBaseChecksum) { return useHBaseChecksum ? this.streamNoFsChecksum : this.stream; @@ -157,6 +166,8 @@ public class FSDataInputStreamWrapper { /** * Read from non-checksum stream failed, fall back to FS checksum. Thread-safe. * @param offCount For how many checksumOk calls to turn off the HBase checksum. + * @return the stream to use + * @throws IOException if an IO error occurs */ public FSDataInputStream fallbackToFsChecksum(int offCount) throws IOException { // checksumOffCount is speculative, but let's try to reset it less. @@ -186,7 +197,10 @@ public class FSDataInputStreamWrapper { } } - /** Close stream(s) if necessary. */ + /** + * Close stream(s) if necessary. + * @throws IOException if an IO error occurs + */ public void close() throws IOException { if (!doCloseStreams) return; try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index 1c5a593..2bcf767 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -349,7 +349,9 @@ public class FileLink { } /** + * @param fs the file system instance * @return true if the file pointed by the link exists + * @throws IOException if an IO error occurs */ public boolean exists(final FileSystem fs) throws IOException { for (int i = 0; i < locations.length; ++i) { @@ -361,7 +363,9 @@ public class FileLink { } /** + * @param fs the file system instance * @return the path of the first available link. + * @throws IOException if an IO error occurs */ public Path getAvailablePath(FileSystem fs) throws IOException { for (int i = 0; i < locations.length; ++i) { @@ -422,6 +426,8 @@ public class FileLink { /** * NOTE: This method must be used only in the constructor! * It creates a List with the specified locations for the link. + * @param originPath the original path + * @param alternativePaths the alternative paths */ protected void setLocations(Path originPath, Path... alternativePaths) { assert this.locations == null : "Link locations already set"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index 6d21ace..7f3e244 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -93,6 +93,9 @@ public class HFileLink extends FileLink { /** * Dead simple hfile link constructor + * @param originPath the original path + * @param tempPath the temporary path + * @param archivePath the archive path */ public HFileLink(final Path originPath, final Path tempPath, final Path archivePath) { @@ -106,6 +109,7 @@ public class HFileLink extends FileLink { /** * @param conf {@link Configuration} from which to extract specific archive locations * @param hFileLinkPattern The path ending with a HFileLink pattern. (table=region-hfile) + * @return the {@link HFileLink} * @throws IOException on unexpected error. */ public static final HFileLink buildFromHFileLinkPattern(Configuration conf, Path hFileLinkPattern) @@ -118,6 +122,7 @@ public class HFileLink extends FileLink { * @param rootDir Path to the root directory where hbase files are stored * @param archiveDir Path to the hbase archive directory * @param hFileLinkPattern The path of the HFile Link. + * @return the {@link HFileLink} */ public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 43bbab5..cb2ec36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -67,10 +67,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { * Creates a half file reader for a normal hfile. * @param fs fileystem to read from * @param p path to hfile - * @param cacheConf + * @param cacheConf the {@link CacheConfig} instance * @param r original reference file (contains top or bottom) * @param conf Configuration - * @throws IOException + * @throws IOException if an IO error occurs */ public HalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf, final Reference r, final Configuration conf) @@ -93,10 +93,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { * @param p path to hfile * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the hfile file - * @param cacheConf + * @param cacheConf the {@link CacheConfig} instance * @param r original reference file (contains top or bottom) * @param conf Configuration - * @throws IOException + * @throws IOException if an IO error occurs */ public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf, final Reference r, final Configuration conf) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java index a38e3c1..c56a178 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -70,7 +70,7 @@ public class Reference { } /** - * @param splitRow + * @param splitRow the row to split around * @return A {@link Reference} that points at top half of a an hfile */ public static Reference createTopReference(final byte [] splitRow) { @@ -78,7 +78,7 @@ public class Reference { } /** - * @param splitRow + * @param splitRow the row to split around * @return A {@link Reference} that points at the bottom half of a an hfile */ public static Reference createBottomReference(final byte [] splitRow) { @@ -88,7 +88,7 @@ public class Reference { /** * Constructor * @param splitRow This is row we are splitting around. - * @param fr + * @param fr This identifies which part the file covers */ Reference(final byte [] splitRow, final Range fr) { this.splitkey = splitRow == null? null: KeyValueUtil.createFirstOnRow(splitRow).getKey(); @@ -134,6 +134,8 @@ public class Reference { } /** + * @param in the input to read fields from + * @throws IOException if an IO error occurs * @deprecated Writables are going away. Use the pb serialization methods instead. * Remove in a release after 0.96 goes out. This is here only to migrate * old Reference files written with Writables before 0.96. @@ -159,10 +161,10 @@ public class Reference { /** * Read a Reference from FileSystem. - * @param fs - * @param p + * @param fs the file system instance + * @param p path of the reference file * @return New Reference made from passed p - * @throws IOException + * @throws IOException if an IO error occurs */ public static Reference read(final FileSystem fs, final Path p) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java index 52491e6..6de4ee9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java @@ -130,6 +130,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer { /** * Add last bits of metadata to file info before it is written out. + * @throws IOException if an IO error occurs */ protected void finishFileInfo() throws IOException { if (lastCell != null) { @@ -175,7 +176,7 @@ public abstract class AbstractHFileWriter implements HFile.Writer { * * @param trailer fixed file trailer * @param out the data output to write the file info to - * @throws IOException + * @throws IOException if an IO error occurs */ protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out) throws IOException { @@ -210,7 +211,13 @@ public abstract class AbstractHFileWriter implements HFile.Writer { return isDuplicateKey; } - /** Checks the given value for validity. */ + /** + * Checks the given value for validity. + * @param value byte array containing the value + * @param offset the offset of the value + * @param length length of the value + * @throws IOException if an IO error occurs + */ protected void checkValue(final byte[] value, final int offset, final int length) throws IOException { if (value == null) { @@ -235,6 +242,8 @@ public abstract class AbstractHFileWriter implements HFile.Writer { /** * Sets remaining trailer fields, writes the trailer to disk, and optionally * closes the output stream. + * @param trailer the {@link FixedFileTrailer} + * @throws IOException if an IO error occurs */ protected void finishClose(FixedFileTrailer trailer) throws IOException { trailer.setMetaIndexCount(metaNames.size()); @@ -256,7 +265,15 @@ public abstract class AbstractHFileWriter implements HFile.Writer { return Compression.getCompressionAlgorithmByName(algoName); } - /** A helper method to create HFile output streams in constructors */ + /** + * A helper method to create HFile output streams in constructors + * @param conf the Configuration instance + * @param fs the file system instance + * @param path the path to create on file system + * @param favoredNodes address of the favored nodes + * @return the output stream to use + * @throws IOException if an IO error occurs + */ protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { FsPermission perms = FSUtils.getFilePermissions(fs, conf, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 57c4be9..46ceeea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -68,6 +68,7 @@ public interface BlockCache extends Iterable { /** * Evicts all blocks for the given HFile. * + * @param hfileName name of the HFile * @return the number of blocks evicted */ int evictBlocksByHfileName(String hfileName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index 94638da..69f5adf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -57,7 +57,8 @@ public class BlockCacheUtil { } /** - * @param cb + * @param cb the {@link CachedBlock} object + * @param now the current time in nano seconds * @return The block content as String. */ public static String toString(final CachedBlock cb, final long now) { @@ -101,12 +102,12 @@ public class BlockCacheUtil { } /** - * @param filename - * @param blocks + * @param filename name of the file + * @param blocks the blocks * @return A JSON String of filename and counts of blocks - * @throws JsonGenerationException - * @throws JsonMappingException - * @throws IOException + * @throws JsonGenerationException if error occurred during json generation + * @throws JsonMappingException if error occurred during json mapping + * @throws IOException if an IO error occurs */ public static String toJSON(final String filename, final NavigableSet blocks) throws JsonGenerationException, JsonMappingException, IOException { @@ -124,11 +125,11 @@ public class BlockCacheUtil { } /** - * @param cbsbf + * @param cbsbf the count of cached blocks by file * @return JSON string of cbsf aggregated - * @throws JsonGenerationException - * @throws JsonMappingException - * @throws IOException + * @throws JsonGenerationException if json generate failed + * @throws JsonMappingException if json mapping failed + * @throws IOException if an IO error occurs */ public static String toJSON(final CachedBlocksByFile cbsbf) throws JsonGenerationException, JsonMappingException, IOException { @@ -136,11 +137,11 @@ public class BlockCacheUtil { } /** - * @param bc + * @param bc the {@link BlockCache} instance * @return JSON string of bc content. - * @throws JsonGenerationException - * @throws JsonMappingException - * @throws IOException + * @throws JsonGenerationException if json generate failed + * @throws JsonMappingException if json mapping failed + * @throws IOException if an IO error occurs */ public static String toJSON(final BlockCache bc) throws JsonGenerationException, JsonMappingException, IOException { @@ -148,7 +149,8 @@ public class BlockCacheUtil { } /** - * @param cb + * @param cb the cached block + * @param now the current time in nano seconds * @return The block content of bc as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { @@ -207,7 +209,7 @@ public class BlockCacheUtil { Histogram age = METRICS.newHistogram(CachedBlocksByFile.class, "age"); /** - * @param cb + * @param cb the cached block to update * @return True if full.... if we won't be adding any more. */ public boolean update(final CachedBlock cb) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 26eb1da..c8a16eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -280,7 +280,7 @@ public class CacheConfig { /** * Constructs a cache configuration copied from the specified configuration. - * @param cacheConf + * @param cacheConf the {@link CacheConfig} object to copy from */ public CacheConfig(CacheConfig cacheConf) { this(cacheConf.blockCache, cacheConf.cacheDataOnRead, cacheConf.inMemory, @@ -292,6 +292,7 @@ public class CacheConfig { /** * Checks whether the block cache is enabled. + * @return true if block cache enabled */ public boolean isBlockCacheEnabled() { return this.blockCache != null; @@ -318,6 +319,8 @@ public class CacheConfig { * Should we cache a block of a particular category? We always cache * important blocks such as index blocks, as long as the block cache is * available. + * @param category category of the block + * @return a boolean decides whether to cache the block on read */ public boolean shouldCacheBlockOnRead(BlockCategory category) { return isBlockCacheEnabled() @@ -412,6 +415,7 @@ public class CacheConfig { } /** + * @param category category of the block * @return true if this {@link BlockCategory} should be compressed in blockcache, false otherwise */ public boolean shouldCacheCompressed(BlockCategory category) { @@ -438,6 +442,8 @@ public class CacheConfig { * consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in * the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled} * configuration. + * @param blockType type of the block + * @return true if should read the given block from cache */ public boolean shouldReadBlockFromCache(BlockType blockType) { if (!isBlockCacheEnabled()) { @@ -465,6 +471,8 @@ public class CacheConfig { /** * If we make sure the block could not be cached, we will not acquire the lock * otherwise we will acquire lock + * @param blockType type of the block + * @return true if should acquire the lock */ public boolean shouldLockOnCacheMiss(BlockType blockType) { if (blockType == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java index f56a921..439f8a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java @@ -31,17 +31,19 @@ public interface CacheableDeserializer { /** * Returns the deserialized object. * + * @param b the byte buffer to deserialize object from * @return T the deserialized object. + * @throws IOException if an IO error occurs */ T deserialize(ByteBuffer b) throws IOException; /** * - * @param b + * @param b the {@link ByteBuffer} to deserialize * @param reuse true if Cacheable object can use the given buffer as its * content * @return T the deserialized object. - * @throws IOException + * @throws IOException if an IO error occurs */ T deserialize(ByteBuffer b, boolean reuse) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java index 5d2d54a..2e66da9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java @@ -37,7 +37,7 @@ public class CacheableDeserializerIdManager { /** * Register the given cacheable deserializer and generate an unique identifier * id for it - * @param cd + * @param cd the cacheable deserializer * @return the identifier of given cacheable deserializer */ public static int registerDeserializer(CacheableDeserializer cd) { @@ -50,7 +50,7 @@ public class CacheableDeserializerIdManager { /** * Get the cacheable deserializer as the given identifier Id - * @param id + * @param id the id to get the cacheable deserializer * @return CacheableDeserializer */ public static CacheableDeserializer getDeserializer(int id) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 56510f0..7ed8ec7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -523,14 +523,14 @@ public class FixedFileTrailer { } /** - * Returns the major version of this HFile format + * @return the major version of this HFile format */ public int getMajorVersion() { return majorVersion; } /** - * Returns the minor version of this HFile format + * @return the minor version of this HFile format */ public int getMinorVersion() { return minorVersion; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index f014f17..af81b68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -189,6 +189,7 @@ public class HFile { /** * Number of checksum verification failures. It also * clears the counter. + * @return checksum failure count */ public static final long getChecksumFailuresCount() { return checksumFailures.getAndSet(0); @@ -197,7 +198,12 @@ public class HFile { /** API required to write an {@link HFile} */ public interface Writer extends Closeable { - /** Add an element to the file info map. */ + /** + * Add an element to the file info map. + * @param key the key to append + * @param value the value to append + * @throws IOException if an IO error occurs + */ void appendFileInfo(byte[] key, byte[] value) throws IOException; void append(Cell cell) throws IOException; @@ -208,6 +214,7 @@ public class HFile { /** * Adds an inline block writer such as a multi-level block index writer or * a compound Bloom filter writer. + * @param bloomWriter the inline block writer */ void addInlineBlockWriter(InlineBlockWriter bloomWriter); @@ -222,17 +229,20 @@ public class HFile { * Store general Bloom filter in the file. This does not deal with Bloom filter * internals but is necessary, since Bloom filters are stored differently * in HFile version 1 and version 2. + * @param bfw the inline block writer */ void addGeneralBloomFilter(BloomFilterWriter bfw); /** * Store delete family Bloom filter in the file, which is only supported in * HFile V2. + * @param bfw the inline block writer + * @throws IOException if an IO error occurs */ void addDeleteFamilyBloomFilter(BloomFilterWriter bfw) throws IOException; /** - * Return the file context for the HFile this writer belongs to + * @return the file context for the HFile this writer belongs to */ HFileContext getFileContext(); } @@ -313,9 +323,10 @@ public class HFile { } /** - * Returns the factory to be used to create {@link HFile} writers. * Disables block cache access for all writers created through the * returned factory. + * @param conf the {@link Configuration} object + * @return the factory to be used to create {@link HFile} writers. */ public static final WriterFactory getWriterFactoryNoCache(Configuration conf) { @@ -325,7 +336,9 @@ public class HFile { } /** - * Returns the factory to be used to create {@link HFile} writers + * @param conf the {@link Configuration} object + * @param cacheConf the {@link CacheConfig} object + * @return the factory to be used to create {@link HFile} writers */ public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { @@ -351,9 +364,10 @@ public class HFile { * Read in a file block. * @param offset offset to read. * @param onDiskBlockSize size of the block - * @param cacheBlock - * @param pread + * @param cacheBlock whether to cache the block + * @param pread whether to use positional read * @param isCompaction is this block being read as part of a compaction + * @param updateCacheMetrics whether to update cache metrics * @param expectedBlockType the block type we are expecting to read with this read operation, * or null to read whatever block type is available and avoid checking (that might reduce * caching efficiency of encoded data blocks) @@ -362,7 +376,7 @@ public class HFile { * encoding. This check only applies to data blocks and can be set to null when the caller is * expecting to read a non-data block and has set expectedBlockType accordingly. * @return Block wrapped in a ByteBuffer. - * @throws IOException + * @throws IOException if an IO error occurs */ HFileBlock readBlock(long offset, long onDiskBlockSize, boolean cacheBlock, final boolean pread, final boolean isCompaction, @@ -374,7 +388,7 @@ public class HFile { /** An interface used by clients to open and iterate an {@link HFile}. */ public interface Reader extends Closeable, CachingBlockReader { /** - * Returns this reader's "name". Usually the last component of the path. + * @return this reader's "name". Usually the last component of the path. * Needs to be constant as the file is being moved to support caching on * write. */ @@ -416,6 +430,8 @@ public class HFile { * Retrieves general Bloom filter metadata as appropriate for each * {@link HFile} version. * Knows nothing about how that metadata is structured. + * @return the general bloom filter metadata + * @throws IOException if an IO error occurs */ DataInput getGeneralBloomFilterMetadata() throws IOException; @@ -423,12 +439,18 @@ public class HFile { * Retrieves delete family Bloom filter metadata as appropriate for each * {@link HFile} version. * Knows nothing about how that metadata is structured. + * @return the delete bloom filter metadata + * @throws IOException if an IO error occurs */ DataInput getDeleteBloomFilterMetadata() throws IOException; Path getPath(); - /** Close method with optional evictOnClose */ + /** + * Close method with optional evictOnClose + * @param evictOnClose whether to evict when close + * @throws IOException if an IO error occurs + */ void close(boolean evictOnClose) throws IOException; DataBlockEncoding getDataBlockEncoding(); @@ -436,7 +458,7 @@ public class HFile { boolean hasMVCCInfo(); /** - * Return the file context of the HFile this reader belongs to + * @return the file context of the HFile this reader belongs to */ HFileContext getFileContext(); } @@ -510,6 +532,7 @@ public class HFile { * @param fs filesystem * @param path Path to file to read * @param cacheConf This must not be null. @see {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#CacheConfig(Configuration)} + * @param conf the {@link Configuration} object * @return an active Reader instance * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile is corrupt/invalid. */ @@ -778,7 +801,10 @@ public class HFile { } } - /** Return true if the given file info key is reserved for internal use. */ + /** + * @param key the file info key + * @return true if the given file info key is reserved for internal use. + */ public static boolean isReservedFileInfoKey(byte[] key) { return Bytes.startsWith(key, FileInfo.RESERVED_PREFIX_BYTES); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 0a95888..a832e1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -583,7 +583,7 @@ public class HFileBlock implements Cacheable { } /** - * Return true when this block's buffer has been unpacked, false otherwise. Note this is a + * @return true when this block's buffer has been unpacked, false otherwise. Note this is a * calculated heuristic, not tracked attribute of the block. */ public boolean isUnpacked() { @@ -594,7 +594,10 @@ public class HFileBlock implements Cacheable { return bufCapacity == expectedCapacity || bufCapacity == expectedCapacity + headerSize; } - /** An additional sanity-check in case no compression or encryption is being used. */ + /** + * An additional sanity-check in case no compression or encryption is being used. + * @throws IOException if an IO error occurs + */ public void assumeUncompressed() throws IOException { if (onDiskSizeWithoutHeader != uncompressedSizeWithoutHeader + totalChecksumBytes()) { @@ -753,7 +756,6 @@ public class HFileBlock implements Cacheable { * store the serialized block into an external stream. *

  • Repeat to write more blocks. * - *

    */ public static class Writer { @@ -842,6 +844,7 @@ public class HFileBlock implements Cacheable { /** * @param dataBlockEncoder data block encoding algorithm to use + * @param fileContext HFile meta data */ public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) { this.dataBlockEncoder = dataBlockEncoder != null @@ -869,8 +872,9 @@ public class HFileBlock implements Cacheable { /** * Starts writing into the block. The previous block's data is discarded. * + * @param newBlockType block type of the new block * @return the stream the user can write their data into - * @throws IOException + * @throws IOException if an IO error occurs */ public DataOutputStream startWriting(BlockType newBlockType) throws IOException { @@ -899,8 +903,8 @@ public class HFileBlock implements Cacheable { /** * Writes the Cell to this block - * @param cell - * @throws IOException + * @param cell the Cell to write + * @throws IOException if an IO error occurs */ public void write(Cell cell) throws IOException{ expectState(State.WRITING); @@ -1022,8 +1026,8 @@ public class HFileBlock implements Cacheable { * the offset of this block so that it can be referenced in the next block * of the same type. * - * @param out - * @throws IOException + * @param out the output stream to write + * @throws IOException if an IO error occurs */ public void writeHeaderAndData(FSDataOutputStream out) throws IOException { long offset = out.getPos(); @@ -1044,7 +1048,7 @@ public class HFileBlock implements Cacheable { * "writing" state, transitions the writer to the "block ready" state. * * @param out the output stream to write the - * @throws IOException + * @throws IOException if an IO error occurs */ protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { @@ -1192,7 +1196,7 @@ public class HFileBlock implements Cacheable { * * @param bw the block-writable object to write as a block * @param out the file system output stream - * @throws IOException + * @throws IOException if an IO error occurs */ public void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException { @@ -1206,6 +1210,8 @@ public class HFileBlock implements Cacheable { * block does not have checksum data even though the header minor * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a * 0 value in bytesPerChecksum. + * @param cacheConf the {@link CacheConfig} object + * @return the {@link HFileBlock} */ public HFileBlock getBlockForCaching(CacheConfig cacheConf) { HFileContext newContext = new HFileContextBuilder() @@ -1232,7 +1238,7 @@ public class HFileBlock implements Cacheable { /** Something that can be written into a block. */ public interface BlockWritable { - /** The type of block this data should use. */ + /** @return The type of block this data should use. */ BlockType getBlockType(); /** @@ -1240,6 +1246,7 @@ public class HFileBlock implements Cacheable { * records. * * @param out a stream to write uncompressed data into + * @throws IOException if an IO error occurs */ void writeToBlock(DataOutput out) throws IOException; } @@ -1250,13 +1257,17 @@ public class HFileBlock implements Cacheable { public interface BlockIterator { /** - * Get the next block, or null if there are no more blocks to iterate. + * @return the next block, or null if there are no more blocks to iterate. + * @throws IOException if an IO error occurs */ HFileBlock nextBlock() throws IOException; /** * Similar to {@link #nextBlock()} but checks block type, throws an * exception if incorrect, and returns the HFile block + * @param blockType type of the block to get + * @return the next block + * @throws IOException if an IO error occurs */ HFileBlock nextBlockWithBlockType(BlockType blockType) throws IOException; } @@ -1268,12 +1279,14 @@ public class HFileBlock implements Cacheable { * Reads the block at the given offset in the file with the given on-disk * size and uncompressed size. * - * @param offset + * @param offset the given offset * @param onDiskSize the on-disk size of the entire block, including all * applicable headers, or -1 if unknown * @param uncompressedSize the uncompressed size of the compressed part of * the block, or -1 if unknown + * @param pread whether to use positional read * @return the newly read block + * @throws IOException if an IO error occurs */ HFileBlock readBlockData(long offset, long onDiskSize, int uncompressedSize, boolean pread) throws IOException; @@ -1289,13 +1302,22 @@ public class HFileBlock implements Cacheable { */ BlockIterator blockRange(long startOffset, long endOffset); - /** Closes the backing streams */ + /** + * Closes the backing streams + * @throws IOException if an IO error occurs + */ void closeStreams() throws IOException; - /** Get a decoder for {@link BlockType#ENCODED_DATA} blocks from this file. */ + /** + * Get a decoder for {@link BlockType#ENCODED_DATA} blocks from this file. + * @return the {@link HFileBlockDecodingContext} + */ HFileBlockDecodingContext getBlockDecodingContext(); - /** Get the default decoder for blocks from this file. */ + /** + * Get the default decoder for blocks from this file. + * @return the {@link HFileBlockDecodingContext} + */ HFileBlockDecodingContext getDefaultBlockDecodingContext(); } @@ -1862,7 +1884,7 @@ public class HFileBlock implements Cacheable { } /** - * Returns the size of this block header. + * @return the size of this block header. */ public int headerSize() { return headerSize(this.fileContext.isUseHBaseChecksum()); @@ -1870,6 +1892,8 @@ public class HFileBlock implements Cacheable { /** * Maps a minor version to the size of the header. + * @param usesHBaseChecksum whether to count in checksum + * @return size of the header */ public static int headerSize(boolean usesHBaseChecksum) { if (usesHBaseChecksum) { @@ -1879,7 +1903,7 @@ public class HFileBlock implements Cacheable { } /** - * Return the appropriate DUMMY_HEADER for the minor version + * @return the appropriate DUMMY_HEADER for the minor version */ public byte[] getDummyHeaderForVersion() { return getDummyHeaderForVersion(this.fileContext.isUseHBaseChecksum()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index d656cae..8099a7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -167,14 +167,14 @@ public class HFileBlockIndex { * * @param key the key we are looking for * @param currentBlock the current block, to avoid re-reading the same block - * @param cacheBlocks - * @param pread - * @param isCompaction + * @param cacheBlocks whether to cache blocks + * @param pread whether to use positional read + * @param isCompaction is the block being read as part of a compaction * @param expectedDataBlockEncoding the data block encoding the caller is * expecting the data block to be in, or null to not perform this * check and return the block irrespective of the encoding * @return reader a basic way to load blocks - * @throws IOException + * @throws IOException if an IO error occurs */ public HFileBlock seekToDataBlock(final Cell key, HFileBlock currentBlock, boolean cacheBlocks, boolean pread, boolean isCompaction, DataBlockEncoding expectedDataBlockEncoding) @@ -198,15 +198,15 @@ public class HFileBlockIndex { * the key we are looking for * @param currentBlock * the current block, to avoid re-reading the same block - * @param cacheBlocks - * @param pread - * @param isCompaction + * @param cacheBlocks whether to cache blocks + * @param pread whether to use positional read + * @param isCompaction is the block being read as part of a compaction * @param expectedDataBlockEncoding the data block encoding the caller is * expecting the data block to be in, or null to not perform this * check and return the block irrespective of the encoding. * @return the BlockWithScanInfo which contains the DataBlock with other * scan info such as nextIndexedKey. - * @throws IOException + * @throws IOException if an IO error occurs */ public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock currentBlock, boolean cacheBlocks, @@ -318,6 +318,7 @@ public class HFileBlockIndex { * first key of the middle block of the file. * * @return the first key of the middle block + * @throws IOException if an IO error occurs */ public byte[] midkey() throws IOException { if (rootCount == 0) @@ -358,6 +359,7 @@ public class HFileBlockIndex { /** * @param i from 0 to {@link #getRootBlockCount() - 1} + * @return byte array of the root block key */ public byte[] getRootBlockKey(int i) { return blockKeys[i]; @@ -365,6 +367,7 @@ public class HFileBlockIndex { /** * @param i from 0 to {@link #getRootBlockCount() - 1} + * @return offset of the root block key */ public long getRootBlockOffset(int i) { return blockOffsets[i]; @@ -391,6 +394,8 @@ public class HFileBlockIndex { * * @param key * Key to find + * @param offset the offset of the given key + * @param length the length of the given key * @return Offset of block containing key (between 0 and the * number of blocks - 1) or -1 if this file does not contain the * request. @@ -421,6 +426,7 @@ public class HFileBlockIndex { * * @param key * Key to find + * @return the index */ public int rootBlockContainingKey(final Cell key) { int pos = Bytes.binarySearch(blockKeys, key, comparator); @@ -623,7 +629,7 @@ public class HFileBlockIndex { * * @param in the buffered input stream or wrapped byte input stream * @param numEntries the number of root-level index entries - * @throws IOException + * @throws IOException if an IO error occurs */ public void readRootIndex(DataInput in, final int numEntries) throws IOException { @@ -651,7 +657,7 @@ public class HFileBlockIndex { * @param blk the HFile block * @param numEntries the number of root-level index entries * @return the buffered input stream or wrapped byte input stream - * @throws IOException + * @throws IOException if an IO error occurs */ public DataInputStream readRootIndex(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = blk.getByteStream(); @@ -666,7 +672,7 @@ public class HFileBlockIndex { * * @param blk the HFile block * @param numEntries the number of root-level index entries - * @throws IOException + * @throws IOException if an IO error occurs */ public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { @@ -813,6 +819,7 @@ public class HFileBlockIndex { * * @param blockWriter the block writer to use to write index blocks * @param cacheConf used to determine when and how a block should be cached-on-write. + * @param nameForCaching the name for caching */ public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf, String nameForCaching) { @@ -850,7 +857,7 @@ public class HFileBlockIndex { * * @param out FSDataOutputStream * @return position at which we entered the root-level index. - * @throws IOException + * @throws IOException if an IO error occurs */ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) { @@ -908,7 +915,7 @@ public class HFileBlockIndex { * stream writing into an {@link HFile} block. * @param description a short description of the index being written. Used * in a log message. - * @throws IOException + * @throws IOException if an IO error occurs */ public void writeSingleLevelIndex(DataOutput out, String description) throws IOException { @@ -1063,7 +1070,8 @@ public class HFileBlockIndex { * Write out the current inline index block. Inline blocks are non-root * blocks, so the non-root index format is used. * - * @param out + * @param out the output to write the block into + * @throws IOException if an IO error occurs */ @Override public void writeInlineBlock(DataOutput out) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java index 7ba74a2..1d8c4fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java @@ -40,20 +40,20 @@ public interface HFileDataBlockEncoder { * Starts encoding for a block of KeyValues. Call * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[], BlockType)} * to finish encoding of a block. - * @param encodingCtx - * @param out - * @throws IOException + * @param encodingCtx the {@link HFileBlockEncodingContext} + * @param out the output stream to write + * @throws IOException if an IO error occurs */ void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException; /** * Encodes a KeyValue. - * @param cell - * @param encodingCtx - * @param out + * @param cell {@link Cell} to encode + * @param encodingCtx the {@link HFileBlockEncodingContext} + * @param out the output stream to write * @return unencoded kv size - * @throws IOException + * @throws IOException if an IO error occurs */ int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException; @@ -61,11 +61,11 @@ public interface HFileDataBlockEncoder { /** * Ends encoding for a block of KeyValues. Gives a chance for the encoder to do the finishing * stuff for the encoded block. It must be called at the end of block encoding. - * @param encodingCtx - * @param out - * @param uncompressedBytesWithHeader - * @param blockType - * @throws IOException + * @param encodingCtx the {@link HFileBlockEncodingContext} + * @param out the output stream to write + * @param uncompressedBytesWithHeader size before compression with header + * @param blockType type of the block + * @throws IOException if an IO error occurs */ void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, byte[] uncompressedBytesWithHeader, BlockType blockType) throws IOException; @@ -88,6 +88,7 @@ public interface HFileDataBlockEncoder { DataBlockEncoding getDataBlockEncoding(); /** + * @param isCompaction whether a compaction is in progress * @return the effective in-cache data block encoding, taking into account * whether we are doing a compaction. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index c3f864b..b4addde 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -116,8 +116,9 @@ public class HFileReaderV2 extends AbstractHFileReader { * @param fsdis input stream. * @param size Length of the stream. * @param cacheConf Cache configuration. - * @param hfs - * @param conf + * @param hfs the {@link HFileSystem} instance + * @param conf the {@link Configuration} instance + * @throws IOException if an IO error occurs */ public HFileReaderV2(final Path path, final FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf, @@ -313,10 +314,10 @@ public class HFileReaderV2 extends AbstractHFileReader { return null; } /** - * @param metaBlockName + * @param metaBlockName name of the meta block * @param cacheBlock Add block to cache, if found * @return block wrapped in a ByteBuffer, with header skipped - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public ByteBuffer getMetaBlock(String metaBlockName, boolean cacheBlock) @@ -505,7 +506,7 @@ public class HFileReaderV2 extends AbstractHFileReader { /** * @return Midkey for this file. We work with block boundaries only so * returned midkey is an approximation only. - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public byte[] midkey() throws IOException { @@ -627,7 +628,7 @@ public class HFileReaderV2 extends AbstractHFileReader { * 0 if we are at the given key, 1 if we are past the given key * -2 if the key is earlier than the first key of the file while * using a faked index key - * @throws IOException + * @throws IOException if an IO error occurs */ public int seekTo(Cell key, boolean rewind) throws IOException { HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); @@ -689,7 +690,7 @@ public class HFileReaderV2 extends AbstractHFileReader { * data block is found. * * @return the next block, or null if there are no more data blocks - * @throws IOException + * @throws IOException if an IO error occurs */ protected HFileBlock readNextDataBlock() throws IOException { long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); @@ -722,10 +723,10 @@ public class HFileReaderV2 extends AbstractHFileReader { } /** * Compare the given key against the current key - * @param comparator - * @param key - * @param offset - * @param length + * @param comparator the {@link KVComparator} + * @param key byte array of the given key + * @param offset offset of the given key + * @param length length of the given key * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater */ public abstract int compareKey(KVComparator comparator, byte[] key, int offset, @@ -819,7 +820,7 @@ public class HFileReaderV2 extends AbstractHFileReader { /** * Set our selves up for the next 'next' invocation, set up next block. * @return True is more to read else false if at the end. - * @throws IOException + * @throws IOException if an IO error occurs */ private boolean positionForNextBlock() throws IOException { // Methods are small so they get inlined because they are 'hot'. @@ -858,6 +859,7 @@ public class HFileReaderV2 extends AbstractHFileReader { * be called. * * @return true if successfully navigated to the next key/value + * @throws IOException if an IO error occurs */ @Override public boolean next() throws IOException { @@ -878,7 +880,7 @@ public class HFileReaderV2 extends AbstractHFileReader { * * @return false if empty file; i.e. a call to next would return false and * the current key and value are undefined. - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public boolean seekTo() throws IOException { @@ -949,7 +951,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } /** - * @param v + * @param v int value to check * @return True if v < 0 or v > current block buffer limit. */ protected final boolean checkLen(final int v) { @@ -989,7 +991,7 @@ public class HFileReaderV2 extends AbstractHFileReader { /** * Read mvcc. Does checks to see if we even need to read the mvcc at all. - * @param position + * @param position the offset of mvcc in the block buffer */ protected void readMvccVersion(final int position) { // See if we even need to decode mvcc. @@ -1004,7 +1006,7 @@ public class HFileReaderV2 extends AbstractHFileReader { /** * Actually do the mvcc read. Does no checks. - * @param position + * @param position the offset of mvcc in the block buffer */ private void _readMvccVersion(final int position) { // This is Bytes#bytesToVint inlined so can save a few instructions in this hot method; i.e. @@ -1331,6 +1333,7 @@ public class HFileReaderV2 extends AbstractHFileReader { /** * Returns a buffer with the Bloom filter metadata. The caller takes * ownership of the buffer. + * @throws IOException if an IO error occurs */ @Override public DataInput getGeneralBloomFilterMetadata() throws IOException { @@ -1388,7 +1391,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } /** - * Returns false if block prefetching was requested for this file and has + * @return false if block prefetching was requested for this file and has * not completed, true otherwise */ @VisibleForTesting diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java index b5cadb1..4955b8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java @@ -69,6 +69,7 @@ public class HFileReaderV3 extends HFileReaderV2 { * The file system. * @param conf * Configuration + * @throws IOException if an IO error occurs */ public HFileReaderV3(final Path path, FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index deaa2c0..75f324d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -52,7 +52,7 @@ public interface HFileScanner { * If there is no key k[i+1] greater than or equal to the input key, then the * scanner will position itself at the end of the file and next() will return * false when it is called. - * @throws IOException + * @throws IOException if an IO error occurs */ @Deprecated int seekTo(byte[] key) throws IOException; @@ -78,7 +78,7 @@ public interface HFileScanner { * @return -1, if key < k[0], no position; * 0, such that k[i] = key and scanner is left in position i; and * 1, such that k[i] < key, and scanner is left in position i. - * @throws IOException + * @throws IOException if an IO error occurs */ @Deprecated int reseekTo(byte[] key) throws IOException; @@ -94,7 +94,7 @@ public interface HFileScanner { * that: k[i] < key. Furthermore: there may be a k[i+1], such that * k[i] < key <= k[i+1] but there may also NOT be a k[i+1], and next() will * return false (EOF). - * @throws IOException + * @throws IOException if an IO error occurs */ @Deprecated boolean seekBefore(byte[] key) throws IOException; @@ -106,13 +106,13 @@ public interface HFileScanner { * Positions this scanner at the start of the file. * @return False if empty file; i.e. a call to next would return false and * the current key and value are undefined. - * @throws IOException + * @throws IOException if an IO error occurs */ boolean seekTo() throws IOException; /** * Scans to the next entry in the file. * @return Returns false if you are at the end otherwise true if more in file. - * @throws IOException + * @throws IOException if an IO error occurs */ boolean next() throws IOException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index edab0dc..cf23457 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -106,7 +106,17 @@ public class HFileWriterV2 extends AbstractHFileWriter { } } - /** Constructor that takes a path, creates and closes the output stream. */ + /** + * Constructor that takes a path, creates and closes the output stream. + * @param conf the {@link Configuration} instance + * @param cacheConf Cache configuration. + * @param fs the file system instance + * @param path the path to create on file system + * @param ostream the output stream + * @param comparator the {@link KVComparator} + * @param context the {@link HFileContext} + * @throws IOException if an IO error occurs + */ public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path, FSDataOutputStream ostream, final KVComparator comparator, final HFileContext context) throws IOException { @@ -116,7 +126,10 @@ public class HFileWriterV2 extends AbstractHFileWriter { finishInit(conf); } - /** Additional initialization steps */ + /** + * Additional initialization steps + * @param conf the {@link Configuration} instance + */ protected void finishInit(final Configuration conf) { if (fsBlockWriter != null) throw new IllegalStateException("finishInit called twice"); @@ -140,7 +153,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { /** * At a block boundary, write all the inline blocks and opens new block. * - * @throws IOException + * @throws IOException if an IO error occurs */ protected void checkBlockBoundary() throws IOException { if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) @@ -209,7 +222,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { /** * Ready a new block for writing. * - * @throws IOException + * @throws IOException if an IO error occurs */ protected void newBlock() throws IOException { // This is where the next block begins. @@ -252,7 +265,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { * Comparator passed on construction. * * @param cell Cell to add. Cannot be empty nor null. - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void append(final Cell cell) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java index 086395c..0533c68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java @@ -60,7 +60,17 @@ public class HFileWriterV3 extends HFileWriterV2 { } } - /** Constructor that takes a path, creates and closes the output stream. */ + /** + * Constructor that takes a path, creates and closes the output stream. + * @param conf the {@link Configuration} instance + * @param cacheConf Cache configuration. + * @param fs the file system instance + * @param path the path to create on file system + * @param ostream the output stream + * @param comparator the {@link KVComparator} + * @param fileContext the {@link HFileContext} + * @throws IOException if an IO error occurs + */ public HFileWriterV3(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path, FSDataOutputStream ostream, final KVComparator comparator, final HFileContext fileContext) throws IOException { @@ -79,7 +89,7 @@ public class HFileWriterV3 extends HFileWriterV2 { * * @param cell * Cell to add. Cannot be empty nor null. - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void append(final Cell cell) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java index 470eb4a..130f1fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java @@ -37,6 +37,7 @@ public interface InlineBlockWriter { * @param closing * whether the file is being closed, in which case we need to write * out all available data and not wait to accumulate another block + * @return true if should write */ boolean shouldWriteBlock(boolean closing); @@ -46,6 +47,7 @@ public interface InlineBlockWriter { * * @param out * a stream (usually a compressing stream) to write the block to + * @throws IOException if an IO error occurs */ void writeInlineBlock(DataOutput out) throws IOException; @@ -62,7 +64,7 @@ public interface InlineBlockWriter { void blockWritten(long offset, int onDiskSize, int uncompressedSize); /** - * The type of blocks this block writer produces. + * @return the type of blocks this block writer produces. */ BlockType getInlineBlockType(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 48e4cad..a85ed64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -216,6 +216,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { /** * Constructor used for testing. Allows disabling of the eviction thread. + * @param maxSize maximum size of this cache, in bytes + * @param blockSize expected average size of blocks, in bytes + * @param evictionThread whether to run evictions in a bg thread or not */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread) { this(maxSize, blockSize, evictionThread, @@ -260,6 +263,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param singleFactor percentage of total size for single-access blocks * @param multiFactor percentage of total size for multiple-access blocks * @param memoryFactor percentage of total size for in-memory blocks + * @param forceInMemory whether to keep the block in-memory forcibly */ public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel, @@ -321,7 +325,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory - * @param cacheDataInL1 + * @param cacheDataInL1 whether to cache the block in L1 tier */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, @@ -394,8 +398,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * per-cf or per-blocktype metrics it can discern from given * {@link LruCachedBlock} * - * @param cb - * @param evict + * @param cb the {@link LruCachedBlock} + * @param evict true if to evict + * @return current size of cache after updating */ protected long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); @@ -441,7 +446,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { /** * Whether the cache contains block with specified cacheKey - * @param cacheKey + * @param cacheKey key of the block * @return true if contains the block */ public boolean containsBlock(BlockCacheKey cacheKey) { @@ -484,7 +489,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { /** * Evict the block, and it will be cached by the victim handler if exists && * block may be read again later - * @param block + * @param block the block to evict * @param evictedByEvictionProcess true if the given block is evicted by * EvictionThread * @return the heap size of evicted block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index fb95007..5469810 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -282,6 +282,8 @@ public final class BucketAllocator { /** * Round up the given block size to bucket size, and get the corresponding * BucketSizeInfo + * @param blockSize the given block size + * @return the chosen {@link BucketSizeInfo} */ public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) { for (int i = 0; i < bucketSizes.length; ++i) @@ -410,8 +412,9 @@ public final class BucketAllocator { /** * Allocate a block with specified size. Return the offset * @param blockSize size of block - * @throws BucketAllocatorException,CacheFullException * @return the offset in the IOEngine + * @throws CacheFullException if cache is full for the requested size + * @throws BucketAllocatorException if fail to allocate a bucket */ public synchronized long allocateBlock(int blockSize) throws CacheFullException, BucketAllocatorException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 45c75e3..5ddb5cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -329,7 +329,7 @@ public class BucketCache implements BlockCache, HeapSize { * @param cacheKey block's cache key * @param cachedItem block buffer * @param inMemory if block is in-memory - * @param cacheDataInL1 + * @param cacheDataInL1 whether to cache block in the L1 tier */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java index de10667..dfc5bc1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java @@ -36,9 +36,9 @@ public class ByteBufferIOEngine implements IOEngine { /** * Construct the ByteBufferIOEngine with the given capacity - * @param capacity + * @param capacity the capacity of the engine * @param direct true if allocate direct buffer - * @throws IOException + * @throws IOException if an IO error occurs */ public ByteBufferIOEngine(long capacity, boolean direct) throws IOException { @@ -69,7 +69,7 @@ public class ByteBufferIOEngine implements IOEngine { * @param offset The offset in the ByteBufferArray of the first byte to be * read * @return number of bytes read - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public int read(ByteBuffer dstBuffer, long offset) throws IOException { @@ -83,7 +83,7 @@ public class ByteBufferIOEngine implements IOEngine { * @param srcBuffer the given byte buffer from which bytes are to be read * @param offset The offset in the ByteBufferArray of the first byte to be * written - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java index 7b6b25f..d5f1a50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java @@ -82,7 +82,7 @@ public class FileIOEngine implements IOEngine { * @param dstBuffer the given byte buffer into which bytes are to be written * @param offset The offset in the file where the first byte to be read * @return number of bytes read - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public int read(ByteBuffer dstBuffer, long offset) throws IOException { @@ -93,7 +93,7 @@ public class FileIOEngine implements IOEngine { * Transfers data from the given byte buffer to file * @param srcBuffer the given byte buffer from which bytes are to be read * @param offset The offset in the file where the first byte to be written - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void write(ByteBuffer srcBuffer, long offset) throws IOException { @@ -102,7 +102,7 @@ public class FileIOEngine implements IOEngine { /** * Sync the data to file after writing - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void sync() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java index 430c5af..f7685b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/IOEngine.java @@ -39,7 +39,7 @@ public interface IOEngine { * @param dstBuffer the given byte buffer into which bytes are to be written * @param offset The offset in the IO engine where the first byte to be read * @return number of bytes read - * @throws IOException + * @throws IOException if an IO error occurs */ int read(ByteBuffer dstBuffer, long offset) throws IOException; @@ -48,13 +48,13 @@ public interface IOEngine { * @param srcBuffer the given byte buffer from which bytes are to be read * @param offset The offset in the IO engine where the first byte to be * written - * @throws IOException + * @throws IOException if an IO error occurs */ void write(ByteBuffer srcBuffer, long offset) throws IOException; /** * Sync the data to IOEngine after writing - * @throws IOException + * @throws IOException if an IO error occurs */ void sync() throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index c23fc84..5d9b53e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -41,8 +41,8 @@ public class Driver { } /** - * @param args - * @throws Throwable + * @param args arguments to run with + * @throws Throwable if error occurs */ public static void main(String[] args) throws Throwable { pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index 6cd0602..60bee3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -88,11 +88,11 @@ implements TableMap { * * Pass the new key and value to reduce. * If any of the grouping columns are not found in the value, the record is skipped. - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * @param key the input key. + * @param value the input value. + * @param output collects mapped keys and values. + * @param reporter facility to report progress. + * @throws IOException if an IO error occurs */ public void map(ImmutableBytesWritable key, Result value, OutputCollector output, @@ -111,7 +111,7 @@ implements TableMap { * * Override this method if you want to deal with nulls differently. * - * @param r + * @param r the record to parse * @return array of byte values */ protected byte[][] extractKeyValues(Result r) { @@ -140,7 +140,7 @@ implements TableMap { * Create a key by concatenating multiple column values. * Override this function in order to produce different types of keys. * - * @param vals + * @param vals values of multiple column * @return key generated by concatenating multiple column values */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index be131e8..bf91c9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -39,9 +39,6 @@ import org.apache.hadoop.mapred.Partitioner; * This is used to partition the output keys into groups of keys. * Keys are grouped according to the regions that currently exist * so that each reducer fills a single region so load is distributed. - * - * @param - * @param */ @InterfaceAudience.Public @InterfaceStability.Stable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java index 2f5a9b8..dca249d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java @@ -62,11 +62,11 @@ implements TableMap { /** * Pass the key, value to reduce - * @param key - * @param value - * @param output - * @param reporter - * @throws IOException + * @param key the input key. + * @param value the input value. + * @param output collects mapped keys and values. + * @param reporter facility to report progress. + * @throws IOException if an IO error occurs */ public void map(ImmutableBytesWritable key, Result value, OutputCollector output, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java index 8d4d301..9827049 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java @@ -45,11 +45,11 @@ implements TableReduce { /** * No aggregation, output pairs of (key, record) - * @param key - * @param values - * @param output - * @param reporter - * @throws IOException + * @param key the key. + * @param values the list of values to reduce. + * @param output to collect keys and combined values. + * @param reporter facility to report progress. + * @throws IOException if an IO error occurs */ public void reduce(ImmutableBytesWritable key, Iterator values, OutputCollector output, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java index 0ce64c3..ab9e331 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java @@ -71,9 +71,9 @@ public class RowCounter extends Configured implements Tool { } /** - * @param args + * @param args arguments to create the job * @return the JobConf - * @throws IOException + * @throws IOException if an IO error occurs */ public JobConf createSubmittableJob(String[] args) throws IOException { JobConf c = new JobConf(getConf(), getClass()); @@ -113,8 +113,8 @@ public class RowCounter extends Configured implements Tool { } /** - * @param args - * @throws Exception + * @param args arguments to run with + * @throws Exception if error occurs */ public static void main(String[] args) throws Exception { int errCode = ToolRunner.run(HBaseConfiguration.create(), new RowCounter(), args); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index ddba8a1..62cf047 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -227,7 +227,7 @@ implements InputFormat { * * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. * @param tableName The {@link TableName} of the table to process. - * @throws IOException + * @throws IOException if an IO error occurs */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { @@ -247,6 +247,7 @@ implements InputFormat { /** * Allows subclasses to get the {@link HTable}. + * @return an HTable instance * @deprecated use {@link #getTable()} */ @Deprecated @@ -256,6 +257,8 @@ implements InputFormat { /** * Allows subclasses to get the {@link Table}. + * @param an instance of Table implementation + * @return the instance of Table implementation */ protected Table getTable() { if (table == null) { @@ -288,7 +291,7 @@ implements InputFormat { /** * Allows subclasses to set the {@link Filter} to be used. * - * @param rowFilter + * @param rowFilter filter to set */ protected void setRowFilter(Filter rowFilter) { this.rowFilter = rowFilter; @@ -308,6 +311,8 @@ implements InputFormat { * it is critical that initializeTable not be called multiple times since this will leak * Connection instances. * + * @param job the JobConf instance + * @throws IOException if an IO error occurs */ protected void initialize(JobConf job) throws IOException { } @@ -316,7 +321,7 @@ implements InputFormat { * Close the Table and related objects that were initialized via * {@link #initializeTable(Connection, TableName)}. * - * @throws IOException + * @throws IOException if an IO error occurs */ protected void closeTable() throws IOException { close(table, connection); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index b5fefbb..1e9b3c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -97,6 +97,7 @@ public class TableMapReduceUtil { * @param job The current job configuration to adjust. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). + * @param inputFormat the InputFormat to set */ public static void initTableMapJob(String table, String columns, Class mapper, @@ -330,6 +331,8 @@ public class TableMapReduceUtil { /** * @see org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job) + * @param job the JobConf instance + * @throws IOException if an IO error occurs */ public static void addDependencyJars(JobConf job) throws IOException { org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index 6e0d9e7..24a8ed6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -57,6 +57,8 @@ public class TableOutputFormat extends FileOutputFormat { /** * Restart from survivable exceptions by creating a new scanner. * - * @param firstRow - * @throws IOException + * @param firstRow the first row to start from + * @throws IOException if an IO error occurs */ public void restart(byte[] firstRow) throws IOException { this.recordReaderImpl.restart(firstRow); @@ -52,7 +52,7 @@ implements RecordReader { /** * Build the scanner. Not done in constructor to allow for extension. * - * @throws IOException + * @throws IOException if an IO error occurs */ public void init() throws IOException { this.recordReaderImpl.restart(this.recordReaderImpl.getStartRow()); @@ -132,7 +132,7 @@ implements RecordReader { * @param key HStoreKey as input key. * @param value MapWritable as input value * @return true if there was more data - * @throws IOException + * @throws IOException if an IO error occurs */ public boolean next(ImmutableBytesWritable key, Result value) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java index 1536330..4c16ebb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java @@ -62,8 +62,8 @@ public class TableRecordReaderImpl { /** * Restart from survivable exceptions by creating a new scanner. * - * @param firstRow - * @throws IOException + * @param firstRow the first row to start from + * @throws IOException if an IO error occurs */ public void restart(byte[] firstRow) throws IOException { Scan currentScan; @@ -104,7 +104,7 @@ public class TableRecordReaderImpl { /** * Build the scanner. Not done in constructor to allow for extension. * - * @throws IOException + * @throws IOException if an IO error occurs */ public void init() throws IOException { restart(startRow); @@ -195,7 +195,7 @@ public class TableRecordReaderImpl { * @param key HStoreKey as input key. * @param value MapWritable as input value * @return true if there was more data - * @throws IOException + * @throws IOException if an IO error occurs */ public boolean next(ImmutableBytesWritable key, Result value) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java index 237fe47..2aa6556 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java @@ -49,10 +49,10 @@ public class TableSplit implements InputSplit, Comparable { /** * Constructor - * @param tableName - * @param startRow - * @param endRow - * @param location + * @param tableName the fully qualified table name instance of the table. + * @param startRow start row of the split + * @param endRow end row of the split + * @param location the region location of the start row */ public TableSplit(TableName tableName, byte [] startRow, byte [] endRow, final String location) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java index 001f64d..8558778 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java @@ -63,7 +63,7 @@ public class CellCreator { * @param voffset value offset * @param vlength value length * @return created Cell - * @throws IOException + * @throws IOException if an IO error occurs */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, @@ -88,7 +88,7 @@ public class CellCreator { * @param vlength value length * @param visExpression visibility expression to be associated with cell * @return created Cell - * @throws IOException + * @throws IOException if an IO error occurs */ @Deprecated public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, @@ -116,9 +116,9 @@ public class CellCreator { * @param value column value * @param voffset value offset * @param vlength value length - * @param tags + * @param tags list of tags * @return created Cell - * @throws IOException + * @throws IOException if an IO error occurs */ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index 9737b55..4f0dff4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -33,8 +33,8 @@ import org.apache.hadoop.util.ProgramDriver; @InterfaceStability.Stable public class Driver { /** - * @param args - * @throws Throwable + * @param args arguments + * @throws Throwable if an error occurs */ public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index 6d6feb1..430d3f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -85,6 +85,9 @@ public class HFileOutputFormat extends FileOutputFormat * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. + * @param job the job for this incremental load + * @param table the table to operate on + * @throws IOException if an IO error occurs */ public static void configureIncrementalLoad(Job job, HTable table) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 1522fb9..dff9619 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -358,8 +358,11 @@ public class HFileOutputFormat2 *

* The user should be sure to set the map output value class to either KeyValue or Put before * running this function. - * + * @param job the job for this incremental load + * @param table the table to operate on + * * @deprecated Use {@link #configureIncrementalLoad(Job, Table, RegionLocator)} instead. + * @throws IOException if an IO error occurs */ @Deprecated public static void configureIncrementalLoad(Job job, HTable table) @@ -380,6 +383,10 @@ public class HFileOutputFormat2 * * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. + * @param job the job for this incremental load + * @param table the table to operate on + * @param regionLocator the RegionLocator instance to get region location information + * @throws IOException if an IO error occurs */ public static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator) throws IOException { @@ -399,6 +406,10 @@ public class HFileOutputFormat2 * * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. + * @param job the job for this incremental load + * @param tableDescriptor descriptor of the table to operate on + * @param regionLocator the RegionLocator instance to get region location information + * @throws IOException if an IO error occurs */ public static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor, RegionLocator regionLocator) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index 062219b..528eb1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -305,9 +305,11 @@ public class Import { /** * Attempt to filter out the keyvalue + * @param filter the filter to apply * @param kv {@link KeyValue} on which to apply the filter * @return null if the key should not be written, otherwise returns the original * {@link KeyValue} + * @throws IOException if an IO error occurs */ public static Cell filterKv(Filter filter, Cell kv) throws IOException { // apply the filter and skip this kv if the filter doesn't apply @@ -410,6 +412,7 @@ public class Import { * @param conf Configuration to update (will be passed to the job) * @param clazz {@link Filter} subclass to instantiate on the server. * @param filterArgs List of arguments to pass to the filter on instantiation + * @throws IOException if an IO error occurs */ public static void addFilterAndArguments(Configuration conf, Class clazz, List filterArgs) throws IOException { @@ -506,6 +509,9 @@ public class Import { * need to flush all the regions of the table as the data is held in memory and is also not * present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the * regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL} + * @param conf the configuration + * @throws IOException if an IO error occurs + * @throws InterruptedException if the thread is interrupted */ public static void flushRegionsIfNecessary(Configuration conf) throws IOException, InterruptedException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 55139f1..865cf78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -143,7 +143,7 @@ public class ImportTsv extends Configured implements Tool { /** * @param columnsSpecification the list of columns to parser out, comma separated. * The row key should be the special token TsvParser.ROWKEY_COLUMN_SPEC - * @param separatorStr + * @param separatorStr the separator */ public TsvParser(String columnsSpecification, String separatorStr) { // Configure separator @@ -411,10 +411,10 @@ public class ImportTsv extends Configured implements Tool { /** * Return starting position and length of row key from the specified line bytes. - * @param lineBytes - * @param length + * @param lineBytes bytes of the line + * @param length length of the line * @return Pair of row key offset and length. - * @throws BadTsvLineException + * @throws BadTsvLineException if the given line is invalid */ public Pair parseRowKey(byte[] lineBytes, int length) throws BadTsvLineException { @@ -449,6 +449,7 @@ public class ImportTsv extends Configured implements Tool { * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. + * @throws ClassNotFoundException if class specified by MAPPER_CONF_KEY cannot be located */ public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException, ClassNotFoundException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 78e7c47..c2b0c17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -328,7 +328,9 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * * @param hfofDir the directory that was provided as the output path * of a job using HFileOutputFormat + * @param admin the {@link Admin} instance for operation * @param table the table to load into + * @param regionLocator the {@link RegionLocator} instance to locate region information * @throws TableNotFoundException if table does not yet exist */ @SuppressWarnings("deprecation") @@ -456,6 +458,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * This takes the LQI's grouped by likely regions and attempts to bulk load * them. Any failures are re-queued for another pass with the * groupOrSplitPhase. + * @param table the table for bulkload + * @param conn the connection + * @param pool the thread pool + * @param queue the queue to store LQIs to retry + * @param regionGroups map to store region group and LQIs mapping + * @throws IOException if an IO error occurs */ protected void bulkLoadPhase(final Table table, final Connection conn, ExecutorService pool, Deque queue, @@ -622,7 +630,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * LQI's corresponding to the resultant hfiles. * * protected for testing - * @throws IOException + * @param regionGroups map to store region group and LQIs mapping + * @param item the LQI to assign + * @param table the table to operate on + * @param startEndKeys boundary of the region + * @return the LQIs after split, or null if no split needed + * @throws IOException if an IO error occurs */ protected List groupOrSplit(Multimap regionGroups, final LoadQueueItem item, final Table table, @@ -706,6 +719,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * (HBASE-9508). * This will be removed in HBase 2.0.0. * Use {@link #tryAtomicRegionLoad(Connection, TableName, byte[], Collection)}. + * @param conn the connection instance + * @param tableName name of the table + * @param first first row of the region to load against + * @param lqis the LQIs to load + * @return the LQIs to retry if any recoverable failure + * @throws IOException if an IO error occurs */ @Deprecated protected List tryAtomicRegionLoad(final HConnection conn, @@ -724,6 +743,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * * Protected for testing. * + * @param conn the connection instance + * @param tableName name of the table + * @param first first row of the region to load against + * @param lqis the LQIs to load * @return empty list if success, list of items to retry on recoverable * failure */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index 9ba59f7..b985d6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -275,6 +275,7 @@ public abstract class MultiTableInputFormatBase extends /** * Allows subclasses to get the list of {@link Scan} objects. + * @return the list of scan objects */ protected List getScans() { return this.scans; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java index 755f7cd..81693b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java @@ -54,7 +54,6 @@ import org.apache.hadoop.util.ReflectionUtils; * The Map-Reduce job has to be configured with the mapper to use via * {@link #setMapperClass} and the number of thread the thread-pool can use with the * {@link #getNumberOfThreads} method. The default value is 10 threads. - *

*/ public class MultithreadedTableMapper extends TableMapper { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java index 1c65068..6f1355d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java @@ -40,7 +40,7 @@ import org.apache.hadoop.mapreduce.Partitioner; * divided in ten will not make regions whose range is 0-10, 10-20, and so on. * Make your own partitioner if you need the region spacing to come out a * particular way. - * @param + * @param class of value * @see #START * @see #END */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index bc2537b..0b4a6d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -270,6 +270,8 @@ implements Configurable { /** * Sets split table in map-reduce job. + * @param job the map-reduce job + * @param tableName the name of the table */ public static void configureSplitTable(Job job, TableName tableName) { job.getConfiguration().set(SPLIT_TABLE, tableName.getNameAsString()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index ef35df6..8577e0a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -27,8 +27,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import javax.naming.NamingException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -283,12 +281,7 @@ extends InputFormat { } InetAddress regionAddress = isa.getAddress(); String regionLocation; - try { - regionLocation = reverseDNS(regionAddress); - } catch (NamingException e) { - LOG.warn("Cannot resolve the host name for " + regionAddress + " because of " + e); - regionLocation = location.getHostname(); - } + regionLocation = reverseDNS(regionAddress); byte[] startRow = scan.getStartRow(); byte[] stopRow = scan.getStopRow(); @@ -344,9 +337,12 @@ extends InputFormat { /** * @deprecated mistakenly made public in 0.98.7. scope will change to package-private + * @param ipAddress IP address to reverse to host name + * @return the relative host name + * @throws UnknownHostException if no host found for the given address */ @Deprecated - public String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException { + public String reverseDNS(InetAddress ipAddress) throws UnknownHostException { String hostName = this.reverseDNSCacheMap.get(ipAddress); if (hostName == null) { String ipAddressString = null; @@ -561,6 +557,7 @@ extends InputFormat { * Allows subclasses to get the {@link HTable}. * * @deprecated use {@link #getTable()} + * @return the {@link HTable} instance */ @Deprecated protected HTable getHTable() { @@ -569,6 +566,7 @@ extends InputFormat { /** * Allows subclasses to get the {@link RegionLocator}. + * @return the {@link RegionLocator} instance */ protected RegionLocator getRegionLocator() { if (regionLocator == null) { @@ -579,6 +577,7 @@ extends InputFormat { /** * Allows subclasses to get the {@link Table}. + * @return the {@link Table} instance */ protected Table getTable() { if (table == null) { @@ -589,6 +588,7 @@ extends InputFormat { /** * Allows subclasses to get the {@link Admin}. + * @return the {@link Admin} instance */ protected Admin getAdmin() { if (admin == null) { @@ -604,7 +604,7 @@ extends InputFormat { * retreiving an Admin interface to the HBase cluster. * * @param table The table to get the data from. - * @throws IOException + * @throws IOException if an IO error occurs * @deprecated Use {@link #initializeTable(Connection, TableName)} instead. */ @Deprecated @@ -636,7 +636,7 @@ extends InputFormat { * * @param connection The {@link Connection} to the HBase cluster. MUST be unmanaged. We will close. * @param tableName The {@link TableName} of the table to process. - * @throws IOException + * @throws IOException if an IO error occurs */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { @@ -691,7 +691,8 @@ extends InputFormat { * if an initialize call is needed, but this behavior may change in the future. In particular, * it is critical that initializeTable not be called multiple times since this will leak * Connection instances. - * + * @param context the {@link JobContext} for initialization + * @throws IOException if an IO error occurs */ protected void initialize(JobContext context) throws IOException { } @@ -700,7 +701,7 @@ extends InputFormat { * Close the Table and related objects that were initialized via * {@link #initializeTable(Connection, TableName)}. * - * @throws IOException + * @throws IOException if an IO error occurs */ protected void closeTable() throws IOException { close(admin, table, regionLocator, connection); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index ce273f5..9001d74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -157,6 +157,7 @@ public class TableMapReduceUtil { * carrying all necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured * job classes via the distributed cache (tmpjars). + * @param inputFormatClass the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, @@ -297,6 +298,7 @@ public class TableMapReduceUtil { * direct memory will likely cause the map tasks to OOM when opening the region. This * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user * wants to override this behavior in their job. + * @param conf the {@link Configuration} instance to set */ public static void resetCacheConfig(Configuration conf) { conf.setFloat( @@ -712,6 +714,7 @@ public class TableMapReduceUtil { * @param conf The Configuration object to extend with dependencies. * @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil * @see PIG-3285 + * @throws IOException if an IO error occurs */ public static void addHBaseDependencyJars(Configuration conf) throws IOException { @@ -749,6 +752,8 @@ public class TableMapReduceUtil { /** * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. * Also exposed to shell scripts via `bin/hbase mapredcp`. + * @param conf the configuration to read + * @return the classpath string */ public static String buildDependencyClasspath(Configuration conf) { if (conf == null) { @@ -773,6 +778,8 @@ public class TableMapReduceUtil { * Add the HBase dependency jars as well as jars for any of the configured * job classes to the job configuration, so that JobClient will ship them * to the cluster and add them to the DistributedCache. + * @param job The {@link Job} to set + * @throws IOException if an IO error occurs */ public static void addDependencyJars(Job job) throws IOException { addHBaseDependencyJars(job.getConfiguration()); @@ -797,6 +804,9 @@ public class TableMapReduceUtil { * Add the jars containing the given classes to the job's configuration * such that JobClient will ship them to the cluster and add them to * the DistributedCache. + * @param conf the configuration to read + * @param classes the classes to search for the jars + * @throws IOException if an IO error occurs */ public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 7b23075..3e31b97 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -89,8 +89,7 @@ implements Configurable { private BufferedMutator mutator; /** - * @throws IOException - * + * @throws IOException if an IO error occurs */ public TableRecordWriter() throws IOException { String tableName = conf.get(OUTPUT_TABLE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 21dc213..42f35fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -93,7 +93,7 @@ extends RecordReader { * Returns the current key. * * @return The current key. - * @throws IOException + * @throws IOException if an IO error occurs * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey() */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 4349537..83ee1c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -98,8 +98,9 @@ public class TableRecordReaderImpl { /** * In new mapreduce APIs, TaskAttemptContext has two getCounter methods * Check if getCounter(String, String) method is available. + * @param context the {@link TaskAttemptContext} instance * @return The getCounter method or null if not available. - * @throws IOException + * @throws IOException if an IO error occurs */ protected static Method retrieveGetCounterWithStringsParams(TaskAttemptContext context) throws IOException { @@ -140,11 +141,12 @@ public class TableRecordReaderImpl { /** * Build the scanner. Not done in constructor to allow for extension. * - * @throws IOException, InterruptedException + * @param inputsplit the input split + * @param context the task attempt context + * @throws IOException if an IO error occurs */ public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + TaskAttemptContext context) throws IOException { if (context != null) { this.context = context; getCounter = retrieveGetCounterWithStringsParams(context); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index 8496868..ee9a0c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -294,6 +294,9 @@ public class TableSnapshotInputFormatImpl { * we are doing a simple heuristic, where we will pass all hosts which have at least 80% * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top * host with the best locality. + * @param conf the configuration to read from + * @param blockDistribution the block distribution + * @return locations to pass */ public static List getBestLocations( Configuration conf, HDFSBlocksDistribution blockDistribution) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java index 48d7708..ec616d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java @@ -42,6 +42,7 @@ public interface VisibilityExpressionResolver extends Configurable { * @param visExpression the label expression * @return The list of tags corresponds to the visibility expression. These tags will be stored * along with the Cells. + * @throws IOException if an IO error occurs */ List createVisibilityExpTags(String visExpression) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java index e5257e5..2728e7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java @@ -60,9 +60,10 @@ public abstract class MasterProcedureManager extends ProcedureManager implements * Initialize a globally barriered procedure for master. * * @param master Master service interface - * @throws KeeperException - * @throws IOException - * @throws UnsupportedOperationException + * @param metricsMaster Master metrics + * @throws KeeperException if an unexpected zk error occurs + * @throws IOException if an unexpected IO error occurs + * @throws UnsupportedOperationException if the operation is not supported */ public abstract void initialize(MasterServices master, MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException; @@ -71,7 +72,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements * Execute a distributed procedure on cluster * * @param desc Procedure description - * @throws IOException + * @throws IOException if an unexpected IO error occurs */ public void execProcedure(ProcedureDescription desc) throws IOException { @@ -82,7 +83,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements * * @param desc Procedure description * @return data returned from the procedure execution, null if no data - * @throws IOException + * @throws IOException if an unexpected IO error occurs */ public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException { @@ -94,7 +95,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements * * @param desc Procedure description * @return true if the specified procedure is finished successfully - * @throws IOException + * @throws IOException if an unexpected IO error occurs */ public abstract boolean isProcedureDone(ProcedureDescription desc) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java index 198bcdd..6ee8a16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Procedure.java @@ -239,7 +239,7 @@ public class Procedure implements Callable, ForeignExceptionListener { /** * Sends a message to Members to create a new {@link Subprocedure} for this Procedure and execute * the {@link Subprocedure#acquireBarrier} step. - * @throws ForeignException + * @throws ForeignException if the procedure was already marked as failed */ public void sendGlobalBarrierStart() throws ForeignException { // start the procedure @@ -260,7 +260,7 @@ public class Procedure implements Callable, ForeignExceptionListener { * should only be executed after all members have completed its * {@link Subprocedure#acquireBarrier()} call successfully. This triggers the member * {@link Subprocedure#insideBarrier} method. - * @throws ForeignException + * @throws ForeignException if we can't reach the remote notification mechanism */ public void sendGlobalBarrierReached() throws ForeignException { try { @@ -291,7 +291,7 @@ public class Procedure implements Callable, ForeignExceptionListener { /** * Call back triggered by an individual member upon successful local barrier acquisition - * @param member + * @param member name of the member that acquired */ public void barrierAcquiredByMember(String member) { LOG.debug("member: '" + member + "' joining acquired barrier for procedure '" + procName @@ -313,8 +313,8 @@ public class Procedure implements Callable, ForeignExceptionListener { /** * Call back triggered by a individual member upon successful local in-barrier execution and * release - * @param member - * @param dataFromMember + * @param member name of the member that executed and released its barrier + * @param dataFromMember the data that the member returned along with the notification */ public void barrierReleasedByMember(String member, byte[] dataFromMember) { boolean removed = false; @@ -339,8 +339,8 @@ public class Procedure implements Callable, ForeignExceptionListener { * Waits until the entire procedure has globally completed, or has been aborted. If an * exception is thrown the procedure may or not have run cleanup to trigger the completion latch * yet. - * @throws ForeignException - * @throws InterruptedException + * @throws ForeignException type of error the monitor can throw, if the task fails + * @throws InterruptedException if we are interrupted while waiting on latch */ public void waitForCompleted() throws ForeignException, InterruptedException { waitForLatch(completedLatch, monitor, wakeFrequency, procName + " completed"); @@ -351,8 +351,8 @@ public class Procedure implements Callable, ForeignExceptionListener { * exception is thrown the procedure may or not have run cleanup to trigger the completion latch * yet. * @return data returned from procedure members upon successfully completing subprocedure. - * @throws ForeignException - * @throws InterruptedException + * @throws ForeignException type of error the monitor can throw, if the task fails + * @throws InterruptedException if we are interrupted while waiting */ public HashMap waitForCompletedWithRet() throws ForeignException, InterruptedException { waitForCompleted(); @@ -361,7 +361,8 @@ public class Procedure implements Callable, ForeignExceptionListener { /** * Check if the entire procedure has globally completed, or has been aborted. - * @throws ForeignException + * @return true if the entire procedure complete + * @throws ForeignException exceptions to re-throw if any */ public boolean isCompleted() throws ForeignException { // Rethrow exception if any diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java index d55fc85..4066187 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java @@ -67,7 +67,7 @@ public class ProcedureCoordinator { * The rpc object registers the ProcedureCoordinator and starts any threads in this * constructor. * - * @param rpcs + * @param rpcs RPCs to the coordinate * @param pool Used for executing procedures. */ public ProcedureCoordinator(ProcedureCoordinatorRpcs rpcs, ThreadPoolExecutor pool) { @@ -80,9 +80,10 @@ public class ProcedureCoordinator { * The rpc object registers the ProcedureCoordinator and starts any threads in * this constructor. * - * @param rpcs + * @param rpcs RPCs to the coordinate * @param pool Used for executing procedures. - * @param timeoutMillis + * @param timeoutMillis timeout in milliseconds + * @param wakeTimeMillis wake time in milliseconds */ public ProcedureCoordinator(ProcedureCoordinatorRpcs rpcs, ThreadPoolExecutor pool, long timeoutMillis, long wakeTimeMillis) { @@ -96,8 +97,9 @@ public class ProcedureCoordinator { /** * Default thread pool for the procedure * - * @param coordName + * @param coordName name of the procedure coordinator * @param opThreads the maximum number of threads to allow in the pool + * @return the default thread pool */ public static ThreadPoolExecutor defaultPool(String coordName, int opThreads) { return defaultPool(coordName, opThreads, KEEP_ALIVE_MILLIS_DEFAULT); @@ -106,9 +108,10 @@ public class ProcedureCoordinator { /** * Default thread pool for the procedure * - * @param coordName + * @param coordName name of the procedure coordinator * @param opThreads the maximum number of threads to allow in the pool * @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks + * @return the default thread pool */ public static ThreadPoolExecutor defaultPool(String coordName, int opThreads, long keepAliveMillis) { @@ -119,7 +122,7 @@ public class ProcedureCoordinator { /** * Shutdown the thread pools and release rpc resources - * @throws IOException + * @throws IOException if an IO error occurs */ public void close() throws IOException { // have to use shutdown now to break any latch waiting @@ -240,6 +243,7 @@ public class ProcedureCoordinator { /** * Kick off the named procedure * Currently only one procedure with the same type and name is allowed to run at a time. + * @param fed state holding entity for foreign error handling * @param procName name of the procedure to start * @param procArgs arguments for the procedure * @param expectedMembers expected members to start diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java index 631c270..ffcb76f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java @@ -34,7 +34,7 @@ public interface ProcedureCoordinatorRpcs extends Closeable { /** * Initialize and start threads necessary to connect an implementation's rpc mechanisms. - * @param listener + * @param listener the coordinator for procedure * @return true if succeed, false if encountered initialization errors. */ boolean start(final ProcedureCoordinator listener); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java index 128d965..c4aa392 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java @@ -28,6 +28,7 @@ public abstract class ProcedureManager { * Return the unique signature of the procedure. This signature uniquely * identifies the procedure. By default, this signature is the string used in * the procedure controller (i.e., the root ZK node name for the procedure) + * @return unique signature of the procedure */ public abstract String getProcedureSignature(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java index e06c9ef..4f390e0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java @@ -51,6 +51,8 @@ public abstract class ProcedureManagerHost { /** * Load system procedures. Read the class names from configuration. * Called by constructor. + * @param conf the given Configuration instance + * @param confKey configuration key to get class names */ protected void loadUserProcedures(Configuration conf, String confKey) { Class implClass = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java index 1f22022..224fe79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java @@ -69,8 +69,9 @@ public class ProcedureMember implements Closeable { /** * Default thread pool for the procedure * - * @param memberName + * @param memberName name of the member * @param procThreads the maximum number of threads to allow in the pool + * @return the thread pool for procedure */ public static ThreadPoolExecutor defaultPool(String memberName, int procThreads) { return defaultPool(memberName, procThreads, KEEP_ALIVE_MILLIS_DEFAULT); @@ -79,9 +80,10 @@ public class ProcedureMember implements Closeable { /** * Default thread pool for the procedure * - * @param memberName + * @param memberName name of the member * @param procThreads the maximum number of threads to allow in the pool * @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks + * @return the thread pool for procedure */ public static ThreadPoolExecutor defaultPool(String memberName, int procThreads, long keepAliveMillis) { @@ -104,8 +106,8 @@ public class ProcedureMember implements Closeable { * This is separated from execution so that we can detect and handle the case where the * subprocedure is invalid and inactionable due to bad info (like DISABLED snapshot type being * sent here) - * @param opName - * @param data + * @param opName name of the procedure + * @param data arguments about the procedure * @return subprocedure */ public Subprocedure createSubprocedure(String opName, byte[] data) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java index 96c22b0..83abd7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java @@ -32,6 +32,8 @@ public interface ProcedureMemberRpcs extends Closeable { /** * Initialize and start any threads or connections the member needs. + * @param memberName name of the member + * @param member the ProcedureMember instance */ void start(final String memberName, final ProcedureMember member); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java index 95c3ffe..ad110ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java @@ -35,7 +35,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager { * Initialize a globally barriered procedure for region servers. * * @param rss Region Server service interface - * @throws KeeperException + * @throws KeeperException if an unexpected zk error occurs */ public abstract void initialize(RegionServerServices rss) throws KeeperException; @@ -48,7 +48,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager { * Close this and all running procedure tasks * * @param force forcefully stop all running tasks - * @throws IOException + * @throws IOException if an unexpected IO error occurs */ public abstract void stop(boolean force) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java index 8927338..d28c443 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/Subprocedure.java @@ -240,7 +240,7 @@ abstract public class Subprocedure implements Callable { * Users should override this method. If a quiescent is not required, this is overkill but * can still be used to execute a procedure on all members and to propagate any exceptions. * - * @throws ForeignException + * @throws ForeignException if error occurs */ abstract public void acquireBarrier() throws ForeignException; @@ -253,7 +253,7 @@ abstract public class Subprocedure implements Callable { * to the coordinator upon successful completion. * Users should override this method. * @return the data the subprocedure wants to return to coordinator side. - * @throws ForeignException + * @throws ForeignException if error occurs */ abstract public byte[] insideBarrier() throws ForeignException; @@ -261,13 +261,14 @@ abstract public class Subprocedure implements Callable { * Users should override this method. This implementation of this method should rollback and * cleanup any temporary or partially completed state that the {@link #acquireBarrier()} may have * created. - * @param e + * @param e the error occurred during the process */ abstract public void cleanup(Exception e); /** * Method to cancel the Subprocedure by injecting an exception from and external source. - * @param cause + * @param msg error message to log + * @param cause cause of the error */ public void cancel(String msg, Throwable cause) { LOG.error(msg, cause); @@ -307,8 +308,8 @@ abstract public class Subprocedure implements Callable { /** * Waits until the entire procedure has globally completed, or has been aborted. - * @throws ForeignException - * @throws InterruptedException + * @throws ForeignException type of error the monitor can throw, if the task fails + * @throws InterruptedException if we are interrupted while waiting on latch */ public void waitForLocallyCompleted() throws ForeignException, InterruptedException { Procedure.waitForLatch(releasedLocalBarrier, monitor, wakeFrequency, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java index 8171218..485ca4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java @@ -226,6 +226,8 @@ public abstract class ZKProcedureUtil /** * Is this in the procedure barrier abort znode path + * @param path znode path to check + * @return true if the given path is an abort path */ public boolean isAbortPathNode(String path) { return path.startsWith(this.abortZnode) && !path.equals(abortZnode); @@ -253,6 +255,8 @@ public abstract class ZKProcedureUtil /** * Helper method to print the current state of the ZK tree. + * @param root name of the root directory in zk to print + * @param prefix prefix added to the message before log * @see #logZKTree(String) * @throws KeeperException if an unexpected exception occurs */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index a441a6b..3d34603 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -107,7 +107,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur /** * Close this and all running tasks * @param force forcefully stop all running tasks - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void stop(boolean force) throws IOException { @@ -127,7 +127,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur * Because this gets the local list of regions to flush and not the set the master had, * there is a possibility of a race where regions may be missed. * - * @param table + * @param table name of the table to flush * @return Subprocedure to submit to the ProcedureMemeber. */ public Subprocedure buildSubprocedure(String table) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java index 61d1a9a..ca4f85e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java @@ -52,9 +52,9 @@ public class ReplicationProtbufUtil { /** * A helper to replicate a list of WAL entries using admin protocol. * - * @param admin - * @param entries - * @throws java.io.IOException + * @param admin the Admin instance for cluster administration + * @param entries the WAL entries to be replicated + * @throws java.io.IOException if an IO error occurs */ public static void replicateWALEntry(final AdminService.BlockingInterface admin, final Entry[] entries) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 1ba2ebe..530add5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1580,7 +1580,7 @@ public class RegionCoprocessorHost * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf + * @param cacheConf The cache configuration and block cache reference. * @param r original reference file. This will be not null only when reading a split file. * @return a Reader instance to use instead of the base reader if overriding * default behavior, null otherwise diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index 25a27a9..528122c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -37,7 +37,11 @@ public interface ReplicationService { /** * Initializes the replication service object. - * @throws IOException + * @param rs RS instance which runs the service + * @param fs the file system instance + * @param logdir the directory that contains all wal directories of live RSs + * @param oldLogDir the directory where old logs are archived + * @throws IOException if an IO error occurs */ void initialize( Server rs, FileSystem fs, Path logdir, Path oldLogDir @@ -45,7 +49,7 @@ public interface ReplicationService { /** * Start replication services. - * @throws IOException + * @throws IOException if an IO error occurs */ void startReplicationService() throws IOException; @@ -56,6 +60,7 @@ public interface ReplicationService { /** * Refresh and Get ReplicationLoad + * @return the {@link ReplicationLoad} */ public ReplicationLoad refreshAndGetReplicationLoad(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java index 5f96bf7..6c95738 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSinkService.java @@ -36,7 +36,7 @@ public interface ReplicationSinkService extends ReplicationService { * Carry on the list of log entries down to the sink * @param entries list of WALEntries to replicate * @param cells Cells that the WALEntries refer to (if cells is non-null) - * @throws IOException + * @throws IOException if an IO error occurs */ void replicateLogEntries(List entries, CellScanner cells) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java index 13b502b..5b78ea6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationSourceService.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; @InterfaceAudience.Private public interface ReplicationSourceService extends ReplicationService { /** - * Returns a WALObserver for the service. This is needed to + * @return a WALObserver for the service. This is needed to * observe log rolls and log archival events. */ WALActionsListener getWALActionsListener(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index d8634c3..c89ca8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -1100,6 +1100,7 @@ public class StoreFile { * @param cacheBlocks should this scanner cache blocks? * @param pread use pread (for highly concurrent small readers) * @param isCompaction is scanner being used for compaction? + * @param readPt the read point * @return a scanner */ public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, @@ -1242,12 +1243,12 @@ public class StoreFile { * A method for checking Bloom filters. Called directly from * StoreFileScanner in case of a multi-column query. * - * @param row - * @param rowOffset - * @param rowLen - * @param col - * @param colOffset - * @param colLen + * @param row the array containing the row bytes + * @param rowOffset array index of first row byte + * @param rowLen number of row bytes + * @param col the array containing the qualifier bytes + * @param colOffset array index of first qualifier byte + * @param colLen number of qualifier bytes * @return True if passes */ public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index a8ee091..b51e129 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -69,7 +69,11 @@ public class StoreFileScanner implements KeyValueScanner { /** * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} + * @param reader reader of the store file * @param hfs HFile scanner + * @param useMVCC whether to use mvcc enforcibly + * @param hasMVCC whether the reader has MVCC info + * @param readPt the read point */ public StoreFileScanner(StoreFile.Reader reader, HFileScanner hfs, boolean useMVCC, boolean hasMVCC, long readPt) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java index 457d859..1870abd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java @@ -39,6 +39,7 @@ public interface WALActionsListener { * the first log file from the regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal + * @throws IOException if an IO error occurs */ void preLogRoll(Path oldPath, Path newPath) throws IOException; @@ -47,6 +48,7 @@ public interface WALActionsListener { * the first log file from the regionserver. * @param oldPath the path to the old wal * @param newPath the path to the new wal + * @throws IOException if an IO error occurs */ void postLogRoll(Path oldPath, Path newPath) throws IOException; @@ -54,6 +56,7 @@ public interface WALActionsListener { * The WAL is going to be archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal + * @throws IOException if an IO error occurs */ void preLogArchive(Path oldPath, Path newPath) throws IOException; @@ -61,11 +64,13 @@ public interface WALActionsListener { * The WAL has been archived. * @param oldPath the path to the old wal * @param newPath the path to the new wal + * @throws IOException if an IO error occurs */ void postLogArchive(Path oldPath, Path newPath) throws IOException; /** * A request was made that the WAL be rolled. + * @param tooFewReplicas whether the WAL to roll is under replicated */ void logRollRequested(boolean tooFewReplicas); @@ -76,9 +81,9 @@ public interface WALActionsListener { /** * Called before each write. - * @param info - * @param logKey - * @param logEdit + * @param info {@link HRegionInfo} for the region + * @param logKey {@link WALKey} of the log entry + * @param logEdit {@link WALEdit} of the log entry */ void visitLogEntryBeforeWrite( HRegionInfo info, WALKey logKey, WALEdit logEdit @@ -86,9 +91,9 @@ public interface WALActionsListener { /** * - * @param htd - * @param logKey - * @param logEdit + * @param htd the htable descriptor + * @param logKey {@link WALKey} of the log entry + * @param logEdit {@link WALEdit} of the log entry * TODO: Retire this in favor of {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} * It only exists to get scope when replicating. Scope should be in the WALKey and not need * us passing in a htd. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java index 67051ab..87b0620 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseReplicationEndpoint.java @@ -57,13 +57,13 @@ public abstract class BaseReplicationEndpoint extends AbstractService return filters.isEmpty() ? null : new ChainWALEntryFilter(filters); } - /** Returns a WALEntryFilter for checking the scope. Subclasses can + /** @return a WALEntryFilter for checking the scope. Subclasses can * return null if they don't want this filter */ protected WALEntryFilter getScopeWALEntryFilter() { return new ScopeWALEntryFilter(); } - /** Returns a WALEntryFilter for checking replication per table and CF. Subclasses can + /** @return a WALEntryFilter for checking replication per table and CF. Subclasses can * return null if they don't want this filter */ protected WALEntryFilter getTableCfWALEntryFilter() { return new TableCfWALEntryFilter(ctx.getReplicationPeer()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index de82b7e..536e09e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -61,7 +61,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint /** * A private method used to re-establish a zookeeper session with a peer cluster. - * @param ke + * @param ke the {@link KeeperException} */ protected void reconnect(KeeperException ke) { if (ke instanceof ConnectionLossException || ke instanceof SessionExpiredException @@ -142,6 +142,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * Get the list of all the region servers from the specified peer * @param zkw zk connection to use * @return list of region server addresses or an empty list if the slave is unavailable + * @throws KeeperException if zookeeper error occurs */ protected static List fetchSlavesAddresses(ZooKeeperWatcher zkw) throws KeeperException { @@ -160,7 +161,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * Get a list of all the addresses of all the region servers * for this peer cluster * @return list of addresses - * @throws KeeperException + * @throws KeeperException if zookeeper error occurs */ // Synchronize peer cluster connection attempts to avoid races and rate // limit connections when multiple replication sources try to connect to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index e8a7ddc..4d95157 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -107,12 +107,14 @@ public interface ReplicationEndpoint extends Service { /** * Initialize the replication endpoint with the given context. * @param context replication context - * @throws IOException + * @throws IOException if an IO error occurs */ void init(Context context) throws IOException; - /** Whether or not, the replication endpoint can replicate to it's source cluster with the same - * UUID */ + /** + * @return Whether or not, the replication endpoint can replicate to it's source cluster with the + * same UUID + */ boolean canReplicateToSameCluster(); /** @@ -164,6 +166,7 @@ public interface ReplicationEndpoint extends Service { * target cluster. * @param replicateContext a context where WAL entries and other * parameters can be obtained. + * @return true if replication succeed */ boolean replicate(ReplicateContext replicateContext); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 37dc1dd..080e25f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -64,7 +64,7 @@ public class MetricsSink { /** * Convience method to change metrics when a batch of operations are applied. * - * @param batchSize + * @param batchSize size of the batch */ public void applyBatch(long batchSize) { mss.incrAppliedBatches(1); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 04c3d2d..c9a5454 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -121,6 +121,7 @@ public class MetricsSource { * Convience method to apply changes to metrics do to shipping a batch of logs. * * @param batchSize the size of the batch that was shipped to sinks. + * @param sizeInKB size in KB */ public void shipBatch(long batchSize, int sizeInKB) { singleSourceSource.incrBatchesShipped(1); @@ -133,7 +134,10 @@ public class MetricsSource { globalSourceSource.incrShippedKBs(sizeInKB); } - /** increase the byte number read by source from log file */ + /** + * increase the byte number read by source from log file + * @param readInBytes bytes of the read + */ public void incrLogReadInBytes(long readInBytes) { singleSourceSource.incrLogReadInBytes(readInBytes); globalSourceSource.incrLogReadInBytes(readInBytes); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 78bb92e..f658a48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -90,9 +90,9 @@ public class Replication extends WALActionsListener.Base implements * Instantiate the replication management (if rep is enabled). * @param server Hosting server * @param fs handle to the filesystem - * @param logDir + * @param logDir the directory that contains all wal directories of live RSs * @param oldLogDir directory where logs are archived - * @throws IOException + * @throws IOException if an IO error occurs */ public Replication(final Server server, final FileSystem fs, final Path logDir, final Path oldLogDir) throws IOException{ @@ -191,7 +191,7 @@ public class Replication extends WALActionsListener.Base implements * @param cells The data -- the cells -- that entries describes (the entries * do not contain the Cells we are replicating; they are passed here on the side in this * CellScanner). - * @throws IOException + * @throws IOException if an IO error occurs */ public void replicateLogEntries(List entries, CellScanner cells) throws IOException { if (this.replication) { @@ -202,7 +202,7 @@ public class Replication extends WALActionsListener.Base implements /** * If replication is enabled and this cluster is a master, * it starts - * @throws IOException + * @throws IOException if an IO error occurs */ public void startReplicationService() throws IOException { if (this.replication) { @@ -275,7 +275,7 @@ public class Replication extends WALActionsListener.Base implements /** * This method modifies the master's configuration in order to inject * replication-related features - * @param conf + * @param conf the {@link Configuration} instance */ public static void decorateMasterConfiguration(Configuration conf) { if (!isReplication(conf)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java index 8dd42bc..26014eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -49,8 +49,8 @@ public class ReplicationLoad { /** * buildReplicationLoad - * @param srMetricsList - * @param skMetrics + * @param srMetricsList list of the source metrics + * @param skMetrics the sink metrics */ public void buildReplicationLoad(final List srMetricsList, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 0a62c72..3aa294b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -110,9 +110,9 @@ public class ReplicationSink { /** * Replicate this array of entries directly into the local cluster using the native client. Only * operates against raw protobuf type saving on a conversion from pb to pojo. - * @param entries - * @param cells - * @throws IOException + * @param entries the list of entries to replicate + * @param cells the {@link CellScanner} instance + * @throws IOException if an IO error occurs */ public void replicateEntries(List entries, final CellScanner cells) throws IOException { if (entries.isEmpty()) return; @@ -232,7 +232,7 @@ public class ReplicationSink { * Do the changes and handle the pool * @param tableName table to insert into * @param allRows list of actions - * @throws IOException + * @throws IOException if an IO error occurs */ protected void batch(TableName tableName, Collection> allRows) throws IOException { if (allRows.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java index 76fa6c2..a1f4fda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java @@ -104,6 +104,7 @@ public class ReplicationSinkManager { * Get a randomly-chosen replication sink to replicate to. * * @return a replication sink to replicate to + * @throws IOException if an IO error occurs */ public SinkPeer getReplicationSink() throws IOException { if (endpoint.getLastRegionServerUpdate() > this.lastUpdateToPeers || sinks.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 0496f73..dd344ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -73,7 +73,6 @@ import com.google.common.util.concurrent.Service; *

* A stream is considered down when we cannot contact a region server on the * peer cluster for more than 55 seconds by default. - *

* */ @InterfaceAudience.Private @@ -144,12 +143,14 @@ public class ReplicationSource extends Thread * @param conf configuration to use * @param fs file system to use * @param manager replication manager to ping to + * @param replicationQueues the replication queues + * @param replicationPeers the replication peers * @param stopper the atomic boolean to use to stop the regionserver * @param peerClusterZnode the name of our znode * @param clusterId unique UUID for the cluster * @param replicationEndpoint the replication endpoint implementation * @param metrics metrics for replication source - * @throws IOException + * @throws IOException if an IO error occurs */ @Override public void init(final Configuration conf, final FileSystem fs, @@ -419,7 +420,7 @@ public class ReplicationSource extends Thread * @param entries resulting entries to be replicated * @return true if we got nothing and went to the next file, false if we got * entries - * @throws IOException + * @throws IOException if an IO error occurs */ protected boolean readAllEntriesToReplicateOrNextFile(boolean currentWALisBeingWrittenTo, List entries) throws IOException { @@ -682,7 +683,7 @@ public class ReplicationSource extends Thread /** * Do the shipping logic * @param currentWALisBeingWrittenTo was the current WAL being (seemingly) - * written to when this method was called + * @param entries the entries to replicate */ protected void shipEdits(boolean currentWALisBeingWrittenTo, List entries) { int sleepMultiplier = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index 1e9c714..df91845 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -41,12 +41,14 @@ public interface ReplicationSourceInterface { * @param conf the configuration to use * @param fs the file system to use * @param manager the manager to use - * @param replicationQueues - * @param replicationPeers + * @param replicationQueues the queues of this replication source + * @param replicationPeers the peers of this replication source * @param stopper the stopper object for this region server - * @param peerClusterZnode - * @param clusterId - * @throws IOException + * @param peerClusterZnode znode of the peer cluster + * @param clusterId id of the cluster + * @param replicationEndpoint the {@link ReplicationEndpoint} instance + * @param metrics the metrics + * @throws IOException if an IO error occurs */ public void init(final Configuration conf, final FileSystem fs, final ReplicationSourceManager manager, final ReplicationQueues replicationQueues, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 4d97257..c2a5dcc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -111,14 +111,14 @@ public class ReplicationSourceManager implements ReplicationListener { /** * Creates a replication manager and sets the watch on all the other registered region servers * @param replicationQueues the interface for manipulating replication queues - * @param replicationPeers - * @param replicationTracker + * @param replicationPeers peers for replication + * @param replicationTracker tracker for replication * @param conf the configuration to use * @param server the server for this region server * @param fs the file system to use * @param logDir the directory that contains all wal directories of live RSs * @param oldLogDir the directory where old logs are archived - * @param clusterId + * @param clusterId id of the cluster */ public ReplicationSourceManager(final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers, final ReplicationTracker replicationTracker, @@ -214,6 +214,8 @@ public class ReplicationSourceManager implements ReplicationListener { /** * Adds a normal source per registered peer cluster and tries to process all * old region server wal queues + * @throws IOException if an IO error occurs + * @throws ReplicationException if replication error occurs */ protected void init() throws IOException, ReplicationException { for (String id : this.replicationPeers.getPeerIds()) { @@ -239,7 +241,8 @@ public class ReplicationSourceManager implements ReplicationListener { * Add a new normal source to this region server * @param id the id of the peer cluster * @return the source that was created - * @throws IOException + * @throws IOException if an IO error occurs + * @throws ReplicationException if replication error occurs */ protected ReplicationSourceInterface addSource(String id) throws IOException, ReplicationException { @@ -275,6 +278,7 @@ public class ReplicationSourceManager implements ReplicationListener { /** * Delete a complete queue of wals associated with a peer cluster * @param peerId Id of the peer cluster queue of wals to delete + * @param closeConnection whether to close connection to peer */ public void deleteSource(String peerId, boolean closeConnection) { this.replicationQueues.removeQueue(peerId); @@ -364,10 +368,15 @@ public class ReplicationSourceManager implements ReplicationListener { * @param conf the configuration to use * @param fs the file system to use * @param manager the manager to use + * @param replicationQueues the replication queues + * @param replicationPeers the replication peers * @param server the server object for this region server * @param peerId the id of the peer cluster + * @param clusterId the id of the cluster + * @param peerConfig the config for the peer + * @param replicationPeer the replication peer * @return the created source - * @throws IOException + * @throws IOException if an IO error occurs */ protected ReplicationSourceInterface getReplicationSource(final Configuration conf, final FileSystem fs, final ReplicationSourceManager manager, @@ -642,7 +651,7 @@ public class ReplicationSourceManager implements ReplicationListener { } /** - * Get a string representation of all the sources' metrics + * @return a string representation of all the sources' metrics */ public String getStats() { StringBuffer stats = new StringBuffer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 4b997b4..d0f51ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -62,8 +62,8 @@ public class ReplicationSyncUp extends Configured implements Tool { /** * Main program - * @param args - * @throws Exception + * @param args the arguments to run with + * @throws Exception if error occurs */ public static void main(String[] args) throws Exception { if (conf == null) conf = HBaseConfiguration.create(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationWALReaderManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationWALReaderManager.java index b63f66b..9082b3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationWALReaderManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationWALReaderManager.java @@ -47,8 +47,8 @@ public class ReplicationWALReaderManager { /** * Creates the helper but doesn't open any file * Use setInitialPosition after using the constructor if some content needs to be skipped - * @param fs - * @param conf + * @param fs the file system instance + * @param conf the {@link Configuration} instance */ public ReplicationWALReaderManager(FileSystem fs, Configuration conf) { this.fs = fs; @@ -57,9 +57,9 @@ public class ReplicationWALReaderManager { /** * Opens the file at the current position - * @param path + * @param path the path of the WAL * @return an WAL reader. - * @throws IOException + * @throws IOException if an IO error occurs */ public Reader openReader(Path path) throws IOException { // Detect if this is a new file, if so get a new reader else @@ -81,7 +81,7 @@ public class ReplicationWALReaderManager { /** * Get the next entry, returned and also added in the array * @return a new entry or null - * @throws IOException + * @throws IOException if an IO error occurs */ public Entry readNextAndSetPosition() throws IOException { Entry entry = this.reader.next(); @@ -99,7 +99,7 @@ public class ReplicationWALReaderManager { /** * Advance the reader to the current position - * @throws IOException + * @throws IOException if an IO error occurs */ public void seek() throws IOException { if (this.position != 0) { @@ -121,7 +121,7 @@ public class ReplicationWALReaderManager { /** * Close the current reader - * @throws IOException + * @throws IOException if an IO error occurs */ public void closeReader() throws IOException { if (this.reader != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java index e2425a6..59faf2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ScanLabelGenerator.java @@ -35,8 +35,8 @@ public interface ScanLabelGenerator extends Configurable { /** * Helps to get a list of lables associated with an UGI - * @param user - * @param authorizations + * @param user User who called the RPC method + * @param authorizations Authorizations for the request * @return The labels */ public List getLabels(User user, Authorizations authorizations); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java index 1ef1253..51d9ad2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelService.java @@ -42,6 +42,7 @@ public interface VisibilityLabelService extends Configurable { * any initialization logic. * @param e * the region coprocessor env + * @throws IOException if an IO error occurs */ void init(RegionCoprocessorEnvironment e) throws IOException; @@ -50,6 +51,7 @@ public interface VisibilityLabelService extends Configurable { * @param labels * Labels to add to the system. * @return OperationStatus for each of the label addition + * @throws IOException if an IO error occurs */ OperationStatus[] addLabels(List labels) throws IOException; @@ -60,6 +62,7 @@ public interface VisibilityLabelService extends Configurable { * @param authLabels * Labels which are getting authorized for the user * @return OperationStatus for each of the label auth addition + * @throws IOException if an IO error occurs */ OperationStatus[] setAuths(byte[] user, List authLabels) throws IOException; @@ -70,6 +73,7 @@ public interface VisibilityLabelService extends Configurable { * @param authLabels * Labels which are getting removed from authorization set * @return OperationStatus for each of the label auth removal + * @throws IOException if an IO error occurs */ OperationStatus[] clearAuths(byte[] user, List authLabels) throws IOException; @@ -80,6 +84,7 @@ public interface VisibilityLabelService extends Configurable { * @param systemCall * Whether a system or user originated call. * @return Visibility labels authorized for the given user. + * @throws IOException if an IO error occurs * @deprecated Use {@link #getUserAuths(byte[], boolean)} */ @Deprecated @@ -92,6 +97,7 @@ public interface VisibilityLabelService extends Configurable { * @param systemCall * Whether a system or user originated call. * @return Visibility labels authorized for the given user. + * @throws IOException if an IO error occurs */ List getUserAuths(byte[] user, boolean systemCall) throws IOException; @@ -102,6 +108,7 @@ public interface VisibilityLabelService extends Configurable { * @param systemCall * Whether a system or user originated call. * @return Visibility labels authorized for the given group. + * @throws IOException if an IO error occurs */ List getGroupAuths(String[] groups, boolean systemCall) throws IOException; @@ -109,6 +116,7 @@ public interface VisibilityLabelService extends Configurable { * Retrieve the list of visibility labels defined in the system. * @param regex The regular expression to filter which labels are returned. * @return List of visibility labels + * @throws IOException if an IO error occurs */ List listLabels(String regex) throws IOException; @@ -125,6 +133,7 @@ public interface VisibilityLabelService extends Configurable { * global auth label. * @return The list of tags corresponds to the visibility expression. These tags will be stored * along with the Cells. + * @throws IOException if an IO error occurs */ List createVisibilityExpTags(String visExpression, boolean withSerializationFormat, boolean checkAuths) throws IOException; @@ -136,6 +145,7 @@ public interface VisibilityLabelService extends Configurable { * @param authorizations * Authorizations for the read request * @return The VisibilityExpEvaluator corresponding to the given set of authorization labels. + * @throws IOException if an IO error occurs */ VisibilityExpEvaluator getVisibilityExpEvaluator(Authorizations authorizations) throws IOException; @@ -147,6 +157,7 @@ public interface VisibilityLabelService extends Configurable { * @param user * User for whom system auth check to be done. * @return true if the given user is having system/super auth + * @throws IOException if an IO error occurs * @deprecated Use {@link #havingSystemAuth(User)} */ @Deprecated @@ -159,6 +170,7 @@ public interface VisibilityLabelService extends Configurable { * @param user * User for whom system auth check to be done. * @return true if the given user is having system/super auth + * @throws IOException if an IO error occurs */ boolean havingSystemAuth(User user) throws IOException; @@ -180,6 +192,7 @@ public interface VisibilityLabelService extends Configurable { * this format means the tags are written with unsorted label ordinals * @return true if matching tags are found * @see VisibilityConstants#SORTED_ORDINAL_SERIALIZATION_FORMAT + * @throws IOException if an IO error occurs */ boolean matchVisibility(List putVisTags, Byte putVisTagFormat, List deleteVisTags, Byte deleteVisTagFormat) throws IOException; @@ -199,7 +212,7 @@ public interface VisibilityLabelService extends Configurable { * @param serializationFormat * the serialization format associated with the tag * @return the modified visibility expression in the form of byte[] - * @throws IOException + * @throws IOException if an IO error occurs */ byte[] encodeVisibilityForReplication(final List visTags, final Byte serializationFormat) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index e8dae75..b209764 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -164,6 +164,7 @@ public class RestoreSnapshotHelper { /** * Restore the on-disk table to a specified snapshot state. * @return the set of regions touched by the restore operation + * @throws IOException if an IO error occurs */ public RestoreMetaChanges restoreHdfsRegions() throws IOException { ThreadPoolExecutor exec = SnapshotManifest.createExecutor(conf, "RestoreSnapshot"); @@ -695,10 +696,10 @@ public class RestoreSnapshotHelper { /** * Create a new table descriptor cloning the snapshot table schema. * - * @param snapshotTableDescriptor - * @param tableName + * @param snapshotTableDescriptor descriptor of the snapshot table + * @param tableName the fully qualified table name instance of the table * @return cloned table descriptor - * @throws IOException + * @throws IOException if an IO error occurs */ public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor, final TableName tableName) throws IOException { @@ -718,12 +719,12 @@ public class RestoreSnapshotHelper { /** * Copy the snapshot files for a snapshot scanner, discards meta changes. - * @param conf - * @param fs - * @param rootDir - * @param restoreDir - * @param snapshotName - * @throws IOException + * @param conf the Configuration instance + * @param fs the instance of FileSystem implementation + * @param rootDir hbase root directory on file system + * @param restoreDir hbase restore directory on file system + * @param snapshotName name of the snapshot + * @throws IOException if an IO error occurs */ public static void copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir, Path restoreDir, String snapshotName) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 6216846..e97635d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -79,7 +79,7 @@ public class SnapshotDescriptionUtils { public static class CompletedSnaphotDirectoriesFilter extends FSUtils.BlackListDirFilter { /** - * @param fs + * @param fs the instance of file system implementation */ public CompletedSnaphotDirectoriesFilter(FileSystem fs) { super(fs, Collections.singletonList(SNAPSHOT_TMP_DIR_NAME)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index 606b9c9..10974b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -480,6 +480,7 @@ public final class SnapshotInfo extends Configured implements Tool { * @param conf the {@link Configuration} to use * @param snapshot {@link SnapshotDescription} to get stats from * @return the snapshot stats + * @throws IOException if an IO error occurs */ public static SnapshotStats getSnapshotStats(final Configuration conf, final SnapshotDescription snapshot) throws IOException { @@ -510,6 +511,7 @@ public final class SnapshotInfo extends Configured implements Tool { * Returns the list of available snapshots in the specified location * @param conf the {@link Configuration} to use * @return the list of snapshots + * @throws IOException if an IO error occurs */ public static List getSnapshotList(final Configuration conf) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 007dbfa..9af3738 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -97,6 +97,13 @@ public class SnapshotManifest { * - The RegionServer will create a single region manifest * manifest = SnapshotManifest.create(...) * manifest.addRegion(region) + * + * @param conf the Configuration instance + * @param fs the instance of file system implementation + * @param workingDir the working directory on file system + * @param desc the instance of {@link SnapshotDescription} + * @param monitor the {@link ForeignExceptionSnare} instance for monitoring + * @return the {@link SnapshotManifest} instance */ public static SnapshotManifest create(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc, @@ -112,6 +119,12 @@ public class SnapshotManifest { * hri = regionManifest.getRegionInfo() * for (regionManifest.getFamilyFiles()) * ... + * @param conf the Configuration instance + * @param fs the instance of file system implementation + * @param workingDir the working directory on file system + * @param desc the instance of {@link SnapshotDescription} + * @return the {@link SnapshotManifest} instance + * @throws IOException if an IO error occurs */ public static SnapshotManifest open(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc) throws IOException { @@ -123,6 +136,8 @@ public class SnapshotManifest { /** * Add the table descriptor to the snapshot manifest + * @param htd the table descriptor to add + * @throws IOException if an IO error occurs */ public void addTableDescriptor(final HTableDescriptor htd) throws IOException { this.htd = htd; @@ -153,6 +168,8 @@ public class SnapshotManifest { /** * Creates a 'manifest' for the specified region, by reading directly from the HRegion object. * This is used by the "online snapshot" when the table is enabled. + * @param region the HRegion object + * @throws IOException if an IO error occurs */ public void addRegion(final HRegion region) throws IOException { // 0. Get the ManifestBuilder/RegionVisitor @@ -193,6 +210,9 @@ public class SnapshotManifest { /** * Creates a 'manifest' for the specified region, by reading directly from the disk. * This is used by the "offline snapshot" when the table is disabled. + * @param tableDir table directory on the file system + * @param regionInfo {@link HRegionInfo} for region to be added + * @throws IOException if an IO error occurs */ public void addRegion(final Path tableDir, final HRegionInfo regionInfo) throws IOException { // 0. Get the ManifestBuilder/RegionVisitor @@ -305,6 +325,7 @@ public class SnapshotManifest { /** * Get the current snapshot working dir + * @return the Path of current snapshot working dir */ public Path getSnapshotDir() { return this.workingDir; @@ -312,6 +333,7 @@ public class SnapshotManifest { /** * Get the SnapshotDescription + * @return the SnapshotDescription */ public SnapshotDescription getSnapshotDescription() { return this.desc; @@ -319,6 +341,7 @@ public class SnapshotManifest { /** * Get the table descriptor from the Snapshot + * @return the HTableDescriptor */ public HTableDescriptor getTableDescriptor() { return this.htd; @@ -326,6 +349,7 @@ public class SnapshotManifest { /** * Get all the Region Manifest from the snapshot + * @return all {@link SnapshotManifest} instances from the snapshot */ public List getRegionManifests() { return this.regionManifests; @@ -334,6 +358,7 @@ public class SnapshotManifest { /** * Get all the Region Manifest from the snapshot. * This is an helper to get a map with the region encoded name + * @return the map with region encoded name as key and {@link SnapshotManifest} as value */ public Map getRegionManifestsMap() { if (regionManifests == null || regionManifests.size() == 0) return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 1dfe115..2789e5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -485,7 +485,9 @@ public final class Canary implements Tool { /** * Canary entry point for specified table. - * @throws Exception + * @param admin the Admin instance for cluster administration + * @param tableName name of the target table + * @throws Exception if unexpected error occurs */ public static void sniff(final Admin admin, TableName tableName) throws Exception { sniff(admin, new StdOutSink(), tableName.getNameAsString()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java index 92ab4d1..7ef7425 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java @@ -51,7 +51,6 @@ import java.util.concurrent.atomic.AtomicLong; * scan 'usertable' * Will return: * 0 row(s) in 0.0050 seconds - *

*/ public class WriteSinkCoprocessor extends BaseRegionObserver { private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java index a876aef..adc6a14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java @@ -65,10 +65,15 @@ public abstract class AbstractHBaseTool implements Tool { /** * This method is called to process the options after they have been parsed. + * @param cmd the command to process on */ protected abstract void processOptions(CommandLine cmd); - /** The "main function" of the tool */ + /** + * The "main function" of the tool + * @return exit code indicating success or fail + * @throws Exception if error occurs + */ protected abstract int doWork() throws Exception; @Override @@ -174,6 +179,10 @@ public abstract class AbstractHBaseTool implements Tool { /** * Parse a number and enforce a range. + * @param s the String to parse + * @param minValue min value of the range, including + * @param maxValue max value of the range, including + * @return the long value if parse successfully, or throws {@link IllegalArgumentException} if not */ public static long parseLong(String s, long minValue, long maxValue) { long l = Long.parseLong(s); @@ -188,7 +197,10 @@ public abstract class AbstractHBaseTool implements Tool { return (int) parseLong(s, minValue, maxValue); } - /** Call this from the concrete tool class's main function. */ + /** + * Call this from the concrete tool class's main function. + * @param args arguments for the run + */ protected void doStaticMain(String args[]) { int ret; try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java index 3b9ca9a..bb86a23 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterBase.java @@ -45,6 +45,13 @@ public interface BloomFilterBase { /** * Create a key for a row-column Bloom filter. + * @param rowBuf the array containing the row bytes + * @param rowOffset array index of first row byte + * @param rowLen number of row bytes + * @param qualBuf the array containing the qualifier bytes + * @param qualOffset array index of first qualifier byte + * @param qualLen number of qualifier bytes + * @return the key of the Bloom filter */ byte[] createBloomKey(byte[] rowBuf, int rowOffset, int rowLen, byte[] qualBuf, int qualOffset, int qualLen); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 5ff7207..7a00784 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -92,7 +92,8 @@ public final class BloomFilterFactory { * @param reader the {@link HFile} reader to use to lazily load Bloom filter * blocks * @return an instance of the correct type of Bloom filter - * @throws IllegalArgumentException + * @throws IllegalArgumentException if version of the Bloom filter is not recognized + * @throws IOException if an IO error occurs */ public static BloomFilter createFromMeta(DataInput meta, HFile.Reader reader) @@ -116,6 +117,7 @@ public final class BloomFilterFactory { } /** + * @param conf The Configuration instance to get value from * @return true if general Bloom (Row or RowCol) filters are enabled in the * given configuration */ @@ -124,6 +126,7 @@ public final class BloomFilterFactory { } /** + * @param conf The Configuration instance to get value from * @return true if Delete Family Bloom filters are enabled in the given configuration */ public static boolean isDeleteFamilyBloomEnabled(Configuration conf) { @@ -131,6 +134,7 @@ public final class BloomFilterFactory { } /** + * @param conf The Configuration instance to get value from * @return the Bloom filter error rate in the given configuration */ public static float getErrorRate(Configuration conf) { @@ -138,20 +142,25 @@ public final class BloomFilterFactory { } /** + * @param conf The Configuration instance to get value from * @return the value for Bloom filter max fold in the given configuration */ public static int getMaxFold(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR); } - /** @return the compound Bloom filter block size from the configuration */ + /** + * @param conf The Configuration instance to get value from + * @return the compound Bloom filter block size from the configuration + */ public static int getBloomBlockSize(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_BLOCK_SIZE, 128 * 1024); } /** - * @return max key for the Bloom filter from the configuration - */ + * @param conf The Configuration instance to get value from + * @return max key for the Bloom filter from the configuration + */ public static int getMaxKeys(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, 128 * 1000 * 1000); } @@ -160,9 +169,9 @@ public final class BloomFilterFactory { * Creates a new general (Row or RowCol) Bloom filter at the time of * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing. * - * @param conf - * @param cacheConf - * @param bloomType + * @param conf The Configuration instance to get value from + * @param cacheConf The cache configuration and block cache reference + * @param bloomType type of the Bloom filter * @param maxKeys an estimate of the number of keys we expect to insert. * Irrelevant if compound Bloom filters are enabled. * @param writer the HFile writer @@ -207,8 +216,8 @@ public final class BloomFilterFactory { /** * Creates a new Delete Family Bloom filter at the time of * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing. - * @param conf - * @param cacheConf + * @param conf The Configuration instance to get value from + * @param cacheConf The cache configuration and block cache reference * @param maxKeys an estimate of the number of keys we expect to insert. * Irrelevant if compound Bloom filters are enabled. * @param writer the HFile writer diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java index 56c3776..49855ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java @@ -146,8 +146,10 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { } /** - * @param maxKeys - * @param errorRate + * @param maxKeys Maximum expected number of keys that will be stored in this + * bloom + * @param errorRate Desired false positive error rate. Lower rate = more + * storage required * @return the number of bits for a Bloom filter than can hold the given * number of keys and provide the given error rate, assuming that the * optimal number of hash functions is used and it does not have to @@ -163,8 +165,8 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { * functions is chosen optimally and does not even have to be an integer * (hence the "ideal" in the function name). * - * @param bitSize - * @param errorRate + * @param bitSize the desired number of bits for the Bloom filter bit array + * @param errorRate target false positive rate of the Bloom filter * @return maximum number of keys that can be inserted into the Bloom filter * @see #computeMaxKeys(long, double, int) for a more precise estimate */ @@ -178,9 +180,9 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { * The maximum number of keys we can put into a Bloom filter of a certain * size to get the given error rate, with the given number of hash functions. * - * @param bitSize - * @param errorRate - * @param hashCount + * @param bitSize the desired number of bits for the Bloom filter bit array + * @param errorRate target false positive rate of the Bloom filter + * @param hashCount number of hash functions * @return the maximum number of keys that can be inserted in a Bloom filter * to maintain the target error rate, if the number of hash functions * is provided. @@ -211,9 +213,10 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { * "http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives" * > Wikipedia Bloom filter article. * - * @param maxKeys - * @param bitSize - * @param functionCount + * @param maxKeys Maximum expected number of keys that will be stored in this + * bloom + * @param bitSize the desired number of bits for the Bloom filter bit array + * @param functionCount number of hash functions * @return the actual error rate */ public static double actualErrorRate(long maxKeys, long bitSize, @@ -226,8 +229,8 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { * Increases the given byte size of a Bloom filter until it can be folded by * the given factor. * - * @param bitSize - * @param foldFactor + * @param bitSize the desired number of bits for the Bloom filter bit array + * @param foldFactor factor to 'fold' this bloom to save space * @return Foldable byte size */ public static int computeFoldableByteSize(long bitSize, int foldFactor) { @@ -273,7 +276,7 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { * this bloom to save space. Tradeoff potentially excess bytes in * bloom for ability to fold if keyCount is exponentially greater * than maxKeys. - * @throws IllegalArgumentException + * @throws IllegalArgumentException if settings for this Bloom is not correct */ public ByteBloomFilter(int maxKeys, double errorRate, int hashType, int foldFactor) throws IllegalArgumentException { @@ -296,7 +299,7 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter { * array. Will be increased so that folding is possible. * @param errorRate target false positive rate of the Bloom filter * @param hashType Bloom filter hash function type - * @param foldFactor + * @param foldFactor factor to 'fold' this bloom to save space * @return the new Bloom filter of the desired size */ public static ByteBloomFilter createBySize(int byteSizeHint, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index c8b6e08..f22cd49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -23,8 +23,6 @@ import static org.junit.Assert.*; import java.net.Inet6Address; import java.net.InetAddress; import java.net.UnknownHostException; - -import javax.naming.NamingException; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -33,7 +31,7 @@ import org.junit.experimental.categories.Category; public class TestTableInputFormatBase { @Test public void testTableInputFormatBaseReverseDNSForIPv6() - throws UnknownHostException, NamingException { + throws UnknownHostException { String address = "ipv6.google.com"; String localhost = null; InetAddress addr = null;