Index: src/main/java/org/apache/hadoop/hbase/TableDescriptors.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/TableDescriptors.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/TableDescriptors.java (working copy) @@ -49,8 +49,6 @@ /** * Get Map of all HTableDescriptors. Populates the descriptor cache as a * side effect. - * @param fs - * @param rootdir * @return Map of all descriptors. * @throws IOException */ @@ -72,4 +70,4 @@ */ public HTableDescriptor remove(final String tablename) throws IOException; -} \ No newline at end of file +} Index: src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (working copy) @@ -59,7 +59,7 @@ * the create it will do a getChildren("/") and see "x-222-1", "x-542-30", * "x-352-109", x-333-110". The process will know that the original create * succeeded an the znode it created is "x-352-109". - * @see http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling + * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling" */ public class RecoverableZooKeeper { private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class); @@ -142,7 +142,7 @@ * exists is an idempotent operation. Retry before throw out exception * @param path * @param watcher - * @return + * @return A Stat instance * @throws KeeperException * @throws InterruptedException */ @@ -177,7 +177,7 @@ * exists is an idempotent operation. Retry before throw out exception * @param path * @param watch - * @return + * @return A Stat instance * @throws KeeperException * @throws InterruptedException */ @@ -212,7 +212,7 @@ * getChildren is an idempotent operation. Retry before throw out exception * @param path * @param watcher - * @return + * @return List of children znodes * @throws KeeperException * @throws InterruptedException */ @@ -247,7 +247,7 @@ * getChildren is an idempotent operation. Retry before throw out exception * @param path * @param watch - * @return + * @return List of children znodes * @throws KeeperException * @throws InterruptedException */ @@ -283,7 +283,7 @@ * @param path * @param watcher * @param stat - * @return + * @return Data * @throws KeeperException * @throws InterruptedException */ @@ -320,7 +320,7 @@ * @param path * @param watch * @param stat - * @return + * @return Data * @throws KeeperException * @throws InterruptedException */ @@ -359,7 +359,7 @@ * @param path * @param data * @param version - * @return + * @return Stat instance * @throws KeeperException * @throws InterruptedException */ @@ -427,7 +427,7 @@ * @param data * @param acl * @param createMode - * @return + * @return Path * @throws KeeperException * @throws InterruptedException */ Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (working copy) @@ -68,7 +68,7 @@ * Starts the tracking of the node in ZooKeeper. * *

Use {@link #blockUntilAvailable()} to block until the node is available - * or {@link #getData()} to get the data of the node if it is available. + * or {@link #getData(boolean)} to get the data of the node if it is available. */ public synchronized void start() { this.watcher.registerListener(this); @@ -143,7 +143,7 @@ *

If the node is currently available, the most up-to-date known version of * the data is returned. If the node is not currently available, null is * returned. - * @param whether to refresh the data by calling ZK directly. + * @param refresh whether to refresh the data by calling ZK directly. * @return data of the node, null if unavailable */ public synchronized byte [] getData(boolean refresh) { Index: src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java (working copy) @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData; import org.apache.zookeeper.KeeperException; /** @@ -41,7 +40,7 @@ * listening for changes in the RS node list and watching each node. * *

If an RS node gets deleted, this automatically handles calling of - * {@link ServerManager#expireServer(org.apache.hadoop.hbase.HServerInfo)}. + * {@link ServerManager#expireServer(ServerName)} */ public class RegionServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); @@ -121,7 +120,6 @@ /** * Gets the online servers. * @return list of online servers - * @throws KeeperException */ public List getOnlineServers() { synchronized (this.regionServers) { Index: src/main/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -437,13 +437,13 @@ public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100; /** - * Parameter name for unique identifier for this {@link Configuration} - * instance. If there are two or more {@link Configuration} instances that, + * Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration} + * instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that, * for all intents and purposes, are the same except for their instance ids, - * then they will not be able to share the same {@link Connection} instance. + * then they will not be able to share the same {@link org.apache.hadoop.hbase.client.HConnection} instance. * On the other hand, even if the instance ids are the same, it could result - * in non-shared {@link Connection} instances if some of the other connection - * parameters differ. + * in non-shared {@link org.apache.hadoop.hbase.client.HConnection} + * instances if some of the other connection parameters differ. */ public static String HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id"; Index: src/main/java/org/apache/hadoop/hbase/HServerInfo.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HServerInfo.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/HServerInfo.java (working copy) @@ -104,8 +104,6 @@ /** * @return ServerName and load concatenated. - * @see #getServerName() - * @see #getLoad() */ @Override public synchronized String toString() { @@ -150,4 +148,4 @@ if (this.startCode != o.getStartCode()) return (int)(this.startCode - o.getStartCode()); return 0; } -} \ No newline at end of file +} Index: src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (working copy) @@ -126,8 +126,6 @@ /** * INTERNAL Used by HBase Shell interface to access this metadata * attribute which denotes if the deferred log flush option is enabled - * - * @see #getDeferredLogFlush() */ public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH"; private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY = @@ -524,7 +522,7 @@ * before these deferred edits in memory are flushed onto the filesystem. *

* - * @param true if that deferred log flush is enabled on the table. + * @param isDeferredLogFlush */ public void setDeferredLogFlush(final boolean isDeferredLogFlush) { setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE); @@ -552,8 +550,8 @@ /** * This get the class associated with the region split policy which * determines when a region split should occur. The class used by - * default is {@link ConstantSizeRegionSplitPolicy} which split the - * region base on a constant {@link #getMaxFileSize()} + * default is {@link org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy} + * which split the region base on a constant {@link #getMaxFileSize()} * * @return the class name of the region split policy for this table. * If this returns null, the default constant size based split policy @@ -877,7 +875,7 @@ } /** - * Returns an unmodifiable collection of all the {@link HColumnDescriptors} + * Returns an unmodifiable collection of all the {@link HColumnDescriptor} * of all the column families of the table. * * @return Immutable collection of {@link HColumnDescriptor} of all the Index: src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java (working copy) @@ -56,7 +56,7 @@ /** * Public exposure of Object.clone() in order to allow clients to easily * capture current state. - * @returns a copy of the object whose references will not change + * @return a copy of the object whose references will not change */ public abstract MonitoredTask clone(); Index: src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/monitoring/ThreadMonitoring.java (working copy) @@ -48,8 +48,9 @@ /** * Print all of the thread's information and stack traces. * - * @param stream the stream to - * + * @param sb + * @param info + * @param indent */ public static void appendThreadInfo(StringBuilder sb, ThreadInfo info, Index: src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/executor/RegionTransitionData.java (working copy) @@ -108,7 +108,7 @@ * * @param eventType type of event * @param regionName name of region as per HRegionInfo#getRegionName() - * @param origin Originating {@link ServerName} + * @param serverName Originating {@link ServerName} * @param payload Payload examples include the daughters involved in a * {@link EventType#RS_ZK_REGION_SPLIT}. Can be null */ Index: src/main/java/org/apache/hadoop/hbase/HServerAddress.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HServerAddress.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/HServerAddress.java (working copy) @@ -90,7 +90,7 @@ } /** @return Bind address -- the raw IP, the result of a call to - * {@link InetSocketAddress#getAddress()#getHostAddress()} -- + * InetSocketAddress#getAddress()#getHostAddress() -- * or null if cannot resolve */ public String getBindAddress() { return getBindAddressInternal(address); Index: src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java (working copy) @@ -37,8 +37,7 @@ * When creating the filter, the sender can choose its desired point in a * trade-off between the false positive rate and the size. * - * @see {@link BloomFilterWriter} for the ability to add elements to a Bloom - * filter + * @see BloomFilterWriter for the ability to add elements to a Bloom filter */ public interface BloomFilter extends BloomFilterBase { @@ -59,5 +58,4 @@ * and thus allows a null byte buffer to be passed to contains() */ boolean supportsAutoLoading(); - -} +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy) @@ -19,6 +19,17 @@ */ package org.apache.hadoop.hbase.util; +import java.io.DataInputStream; +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -38,23 +49,11 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; -import java.io.DataInputStream; -import java.io.EOFException; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - /** * Utility methods for interacting with the underlying file system. */ @@ -523,7 +522,7 @@ /** * Compute HDFS blocks distribution of a given file, or a portion of the file * @param fs file system - * @param FileStatus file status of the file + * @param status file status of the file * @param start start position of the portion * @param length length of the portion * @return The HDFS blocks distribution @@ -1104,7 +1103,7 @@ /** * Update table descriptor * @param fs - * @param conf + * @param rootdir * @param hTableDescriptor * @throws IOException */ Index: src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (working copy) @@ -164,8 +164,7 @@ * @param bitSize * @param errorRate * @return maximum number of keys that can be inserted into the Bloom filter - * @see {@link #computeMaxKeys(long, double, int)} for a more precise - * estimate + * @see #computeMaxKeys(long, double, int) for a more precise estimate */ public static long idealMaxKeys(long bitSize, double errorRate) { // The reason we need to use floor here is that otherwise we might put @@ -227,7 +226,7 @@ * * @param bitSize * @param foldFactor - * @return + * @return Foldable byte size */ public static int computeFoldableByteSize(long bitSize, int foldFactor) { long byteSizeLong = (bitSize + 7) / 8; Index: src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java (working copy) @@ -161,11 +161,11 @@ * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing. * * @param conf + * @param cacheConf * @param bloomType * @param maxKeys an estimate of the number of keys we expect to insert. * Irrelevant if compound Bloom filters are enabled. * @param writer the HFile writer - * @param bloomErrorRate * @return the new Bloom filter, or null in case Bloom filters are disabled * or when failed to create one. */ @@ -231,10 +231,10 @@ * Creates a new Delete Family Bloom filter at the time of * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing. * @param conf + * @param cacheConf * @param maxKeys an estimate of the number of keys we expect to insert. * Irrelevant if compound Bloom filters are enabled. * @param writer the HFile writer - * @param bloomErrorRate * @return the new Bloom filter, or null in case Bloom filters are disabled * or when failed to create one. */ Index: src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java (working copy) @@ -71,7 +71,7 @@ /** * Prepare an ordered pair of row and qualifier to be compared using - * {@link KeyValue.KeyComparator}. This is only used for row-column Bloom + * KeyValue.KeyComparator. This is only used for row-column Bloom * filters. */ @Override Index: src/main/java/org/apache/hadoop/hbase/util/Bytes.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/Bytes.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/Bytes.java (working copy) @@ -759,7 +759,7 @@ * This method will get a sequence of bytes from pos -> limit, * but will restore pos after. * @param buf - * @return + * @return byte array */ public static byte[] getBytes(ByteBuffer buf) { int savedPos = buf.position(); Index: src/main/java/org/apache/hadoop/hbase/util/Objects.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/Objects.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/util/Objects.java (working copy) @@ -21,14 +21,10 @@ import java.lang.reflect.Array; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.NavigableSet; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Action; @@ -129,7 +125,7 @@ * Attempts to construct a text description of the given object, by * introspecting known classes and building a description of size. * @param obj - * @return + * @return Description */ public static String describeQuantity(Object obj) { StringBuilder str = new StringBuilder(); Index: src/main/java/org/apache/hadoop/hbase/client/Mutation.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Mutation.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/Mutation.java (working copy) @@ -131,7 +131,6 @@ /** * Method for setting the put's familyMap - * @return familyMap */ public void setFamilyMap(Map> map) { this.familyMap = map; Index: src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (working copy) @@ -208,10 +208,7 @@ * This will then close connection to * the zookeeper ensemble and let go of all resources. * - * @param conf - * configuration whose identity is used to find {@link HConnection} - * instance. - * . + * @param connection */ public static void deleteStaleConnection(HConnection connection) { deleteConnection(connection, true, true); @@ -302,7 +299,7 @@ * of a {@link HConnection} instance based on the given {@link Configuration}. * *

- * If you find yourself wanting to use a {@link Connection} for a relatively + * If you find yourself wanting to use a {@link HConnection} for a relatively * short duration of time, and do not want to deal with the hassle of creating * and cleaning up that resource, then you should consider using this * convenience class. Index: src/main/java/org/apache/hadoop/hbase/client/Append.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Append.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/Append.java (working copy) @@ -39,7 +39,7 @@ *

* To append to a set of columns of a row, instantiate an Append object with the * row to append to. At least one column to append must be specified using the - * {@link #add(byte[], byte[], long)} method. + * {@link #add(byte[], byte[], byte[])} method. */ public class Append extends Mutation implements Writable { // TODO: refactor to derive from Put? Index: src/main/java/org/apache/hadoop/hbase/client/HConnection.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/HConnection.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/HConnection.java (working copy) @@ -21,7 +21,6 @@ import java.io.Closeable; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -192,7 +191,7 @@ * @param regionServer - the server to connect to * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs - * @deprecated Use {@link #getHRegionConnection(InetSocketAddress)} + * @deprecated Use {@link #getHRegionConnection(String, int)} */ public HRegionInterface getHRegionConnection(HServerAddress regionServer) throws IOException; @@ -384,4 +383,4 @@ public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException; -} \ No newline at end of file +} Index: src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java (working copy) @@ -294,7 +294,7 @@ /** * Atomically increments a column value. *

- * Equivalent to {@code {@link #incrementColumnValue(byte[], byte[], byte[], + * Equivalent to {@link #incrementColumnValue(byte[], byte[], byte[], * long, boolean) incrementColumnValue}(row, family, qualifier, amount, * true)} * @param row The row that contains the cell to increment. @@ -392,7 +392,7 @@ * * @param protocol The class or interface defining the remote protocol * @param row The row key used to identify the remote region location - * @return + * @return A CoprocessorProtocol instance */ T coprocessorProxy(Class protocol, byte[] row); @@ -417,7 +417,7 @@ * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} * method * @return a Map of region names to - * {@link Batch.Call#call(Object)} return values + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} return values */ Map coprocessorExec( Class protocol, byte[] startKey, byte[] endKey, Batch.Call callable) @@ -436,7 +436,7 @@ * *

* For each result, the given - * {@link Batch.Callback#update(byte[], byte[], Object)} + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} * method will be called. *

* @@ -447,7 +447,7 @@ * @param callable wraps the CoprocessorProtocol implementation method calls * made per-region * @param callback an instance upon which - * {@link Batch.Callback#update(byte[], byte[], Object)} with the + * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} with the * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} * return value for each region * @param CoprocessorProtocol subclass for the remote invocation Index: src/main/java/org/apache/hadoop/hbase/client/HTablePool.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/HTablePool.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/HTablePool.java (working copy) @@ -106,8 +106,6 @@ * configuration * @param maxSize * maximum number of references to keep for each table - * @param tableFactory - * table factory * @param poolType * pool type which is one of {@link PoolType#Reusable} or * {@link PoolType#ThreadLocal} Index: src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java (working copy) @@ -76,11 +76,6 @@ return actions.get(i); } - /** - * @param i - * @return - * @deprecated - */ public HServerAddress getAddress(int i) { return new HServerAddress(Addressing.createInetSocketAddressFromHostAndPortStr(getHostnamePort(i))); } Index: src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java (working copy) @@ -32,7 +32,7 @@ * and its promoted data type is also a Long value. For computing aggregation * function, this class is used to find the datatype of the cell value. Client * is supposed to instantiate it and passed along as a parameter. See - * {@link TestAggregateProtocol} methods for its sample usage. + * TestAggregateProtocol methods for its sample usage. * Its methods handle null arguments gracefully. */ public class LongColumnInterpreter implements ColumnInterpreter { Index: src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java (working copy) @@ -170,7 +170,7 @@ * @param tableName * @param ci * @param scan - * @return + * @return * @throws Throwable */ public long rowCount(final byte[] tableName, @@ -282,7 +282,7 @@ * @param tableName * @param ci * @param scan - * @return + * @return * @throws Throwable */ public double avg(final byte[] tableName, @@ -348,7 +348,7 @@ * @param tableName * @param ci * @param scan - * @return + * @return * @throws Throwable */ public double std(final byte[] tableName, ColumnInterpreter ci, Index: src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java (working copy) @@ -563,7 +563,7 @@ } /** - * @param keepDeletedRows True if deleted rows should not be collected + * @param keepDeletedCells True if deleted rows should not be collected * immediately. */ public void setKeepDeletedCells(boolean keepDeletedCells) { Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy) @@ -61,7 +61,7 @@ /** * Executes region split as a "transaction". Call {@link #prepare()} to setup * the transaction, {@link #execute(Server, RegionServerServices)} to run the - * transaction and {@link #rollback(OnlineRegions)} to cleanup if execute fails. + * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails. * *

Here is an example of how you would use this class: *

@@ -145,9 +145,6 @@
 
   /**
    * Constructor
-   * @param services So we can online new regions.  If null, we'll skip onlining
-   * (Useful testing).
-   * @param c Configuration to use running split
    * @param r Region to split
    * @param splitrow Row to split around
    */
@@ -430,8 +427,7 @@
    * @param services Used to online/offline regions.
    * @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
    * @return Regions created
-   * @throws KeeperException
-   * @throws NodeExistsException 
+   * @throws IOException
    * @see #rollback(Server, RegionServerServices)
    */
   public PairOfSameType execute(final Server server,
Index: src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java	(revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java	(working copy)
@@ -210,8 +210,8 @@
    * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost#postOpen()} are such hooks.
    *
    * See also {@link org.apache.hadoop.hbase.master.MasterCoprocessorHost#handleCoprocessorThrowable()}
-   * @param env: The coprocessor that threw the exception.
-   * @param e: The exception that was thrown.
+   * @param env The coprocessor that threw the exception.
+   * @param e The exception that was thrown.
    */
   private void handleCoprocessorThrowableNoRethrow(
       final CoprocessorEnvironment env, final Throwable e) {
@@ -566,7 +566,6 @@
   /**
    * @param get the Get request
    * @param results the result set
-   * @return the possibly transformed result set to use
    * @exception IOException Exception
    */
   public void postGet(final Get get, final List results)
Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java	(revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java	(working copy)
@@ -71,7 +71,7 @@
    * @param tablename   - name of table
    * @param logSeqNum   - log sequence number
    * @param now Time at which this edit was written.
-   * @param UUID of the cluster (used in Replication)
+   * @param clusterId of the cluster (used in Replication)
    */
   public HLogKey(final byte [] encodedRegionName, final byte [] tablename,
       long logSeqNum, final long now, UUID clusterId) {
Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java	(revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java	(working copy)
@@ -1441,7 +1441,7 @@
    *
    * In this method, by removing the entry in lastSeqWritten for the region
    * being flushed we ensure that the next edit inserted in this region will be
-   * correctly recorded in {@link #append(HRegionInfo, HLogKey, WALEdit)}. The
+   * correctly recorded in {@link #append(HRegionInfo, byte[], WALEdit, long, HTableDescriptor)} The
    * lsn of the earliest in-memory lsn - which is now in the memstore snapshot -
    * is saved temporarily in the lastSeqWritten map while the flush is active.
    *
@@ -1449,7 +1449,7 @@
    *         {@link #completeCacheFlush(byte[], byte[], long, boolean)} (byte[],
    *         byte[], long)}
    * @see #completeCacheFlush(byte[], byte[], long, boolean)
-   * @see #abortCacheFlush()
+   * @see #abortCacheFlush(byte[])
    */
   public long startCacheFlush(final byte[] encodedRegionName) {
     this.cacheFlushLock.lock();
@@ -1753,29 +1753,6 @@
     System.err.println("         For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
   }
 
-  private static void dump(final Configuration conf, final Path p)
-  throws IOException {
-    FileSystem fs = FileSystem.get(conf);
-    if (!fs.exists(p)) {
-      throw new FileNotFoundException(p.toString());
-    }
-    if (!fs.isFile(p)) {
-      throw new IOException(p + " is not a file");
-    }
-    Reader log = getReader(fs, p, conf);
-    try {
-      int count = 0;
-      HLog.Entry entry;
-      while ((entry = log.next()) != null) {
-        System.out.println("#" + count + ", pos=" + log.getPosition() + " " +
-          entry.toString());
-        count++;
-      }
-    } finally {
-      log.close();
-    }
-  }
-
   private static void split(final Configuration conf, final Path p)
   throws IOException {
     FileSystem fs = FileSystem.get(conf);
Index: src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java	(revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerDynamicMetrics.java	(working copy)
@@ -46,8 +46,7 @@
  * 

* This class has a number of metrics variables that are publicly accessible; * these variables (objects) have methods to update their values; - * for example: - *

{@link #rpcQueueTime}.inc(time) + * for example: rpcQueueTime.inc(time) * */ public class RegionServerDynamicMetrics implements Updater { Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (working copy) @@ -548,8 +548,7 @@ * acquired by a {@link SplitLogWorker}. Since there isn't a water-tight * guarantee that two workers will not be executing the same task therefore it * is better to have workers prepare the task and then have the - * {@link SplitLogManager} commit the work in - * {@link SplitLogManager.TaskFinisher} + * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher */ static public interface TaskExecutor { static public enum Status { Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -346,7 +346,7 @@ /** * HRegion constructor. his constructor should only be used for testing and * extensions. Instances of HRegion should be instantiated with the - * {@link HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)} method. + * {@link HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)} method. * * * @param tableDir qualified path of directory where region should be located, @@ -364,7 +364,7 @@ * is new), then read them from the supplied path. * @param rsServices reference to {@link RegionServerServices} or null * - * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester) + * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices) */ public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd, @@ -1161,7 +1161,7 @@ * @param status * @return true if the region needs compacting * @throws IOException - * @see #internalFlushcache() + * @see #internalFlushcache(MonitoredTask) */ protected boolean internalFlushcache( final HLog wal, final long myseqid, MonitoredTask status) @@ -2585,7 +2585,7 @@ /** * Release the row lock! - * @param lockid The lock ID to release. + * @param lockId The lock ID to release. */ public void releaseRowLock(final Integer lockId) { HashedBytes rowKey = lockIds.remove(lockId); @@ -2995,13 +2995,14 @@ /** * Open a Region. - * @param info Info for region to be opened. + * @param info Info for region to be opened + * @param htd * @param wal HLog for region to use. This method will call * HLog#setSequenceNumber(long) passing the result of the call to * HRegion#getMinSequenceId() to ensure the log id is properly kept * up. HRegionStore does this every time it opens a new region. * @param conf - * @param flusher An interface we can request flushes against. + * @param rsServices An interface we can request flushes against. * @param reporter An interface we can report progress against. * @return new HRegion * @@ -3565,7 +3566,6 @@ * * @param append * @param lockid - * @param returnResult * @param writeToWAL * @return new keyvalues after increment * @throws IOException Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy) @@ -70,7 +70,7 @@ /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memstore to disk. To - * create, call {@link #createWriter(FileSystem, Path, int, Configuration)} + * create, call {@link #createWriter(FileSystem, Path, int, Configuration, CacheConfig)} * and append data. Be sure to add any metadata before calling close on the * Writer (Use the appendMetadata convenience methods). On close, a StoreFile * is sitting in the Filesystem. To refer to it, create a StoreFile instance @@ -402,7 +402,7 @@ * helper function to compute HDFS blocks distribution of a given file. * For reference file, it is an estimate * @param fs The FileSystem - * @param o The path of the file + * @param p The path of the file * @return HDFS blocks distribution */ static public HDFSBlocksDistribution computeHDFSBlockDistribution( @@ -531,7 +531,6 @@ /** * @return Current reader. Must call createReader first else returns null. - * @throws IOException * @see #createReader() */ public Reader getReader() { @@ -539,7 +538,7 @@ } /** - * @param b + * @param evictOnClose * @throws IOException */ public synchronized void closeReader(boolean evictOnClose) @@ -855,7 +854,6 @@ * If the timeRangeTracker is not set, * update TimeRangeTracker to include the timestamp of this key * @param kv - * @throws IOException */ public void trackTimestamps(final KeyValue kv) { if (KeyValue.Type.Put.getCode() == kv.getType()) { @@ -1243,7 +1241,7 @@ /** * A method for checking Bloom filters. Called directly from - * {@link StoreFileScanner} in case of a multi-column query. + * StoreFileScanner in case of a multi-column query. * * @param row * @param rowOffset @@ -1251,7 +1249,7 @@ * @param col * @param colOffset * @param colLen - * @return + * @return True if passes */ public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, int rowLen, byte[] col, int colOffset, int colLen) { Index: src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (working copy) @@ -94,11 +94,10 @@ /** * Does the real seek operation in case it was skipped by - * {@link #seekToRowCol(KeyValue, boolean)}. Note that this function should + * seekToRowCol(KeyValue, boolean) (TODO: Whats this?). Note that this function should * be never called on scanners that always do real seek operations (i.e. most * of the scanners). The easiest way to achieve this is to call * {@link #realSeekDone()} first. */ public void enforceSeek() throws IOException; - -} +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java (working copy) @@ -47,7 +47,7 @@ /** * Constructor * @param value value - * @param BitwiseOp bitOperator - the operator to use on the bit comparison + * @param bitOperator operator to use on the bit comparison */ public BitComparator(byte[] value, BitwiseOp bitOperator) { super(value); Index: src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java (working copy) @@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.filter; +import java.util.ArrayList; + import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Scan; -import java.util.List; -import java.util.ArrayList; - /** * This filter is used to filter based on the key. It takes an operator * (equal, greater, not equal, etc) and a byte [] comparator for the row, Index: src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java (working copy) @@ -19,26 +19,19 @@ */ package org.apache.hadoop.hbase.filter; -import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; -import java.util.TreeSet; import java.util.ArrayList; -import java.util.Stack; +import java.util.EmptyStackException; import java.util.HashMap; import java.util.Set; +import java.util.Stack; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.filter.ParseConstants; - -import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import java.lang.ArrayIndexOutOfBoundsException; -import java.lang.ClassCastException; -import java.lang.reflect.*; -import java.util.EmptyStackException; +import org.apache.hadoop.hbase.util.Bytes; /** * This class allows a user to specify a filter via a string @@ -289,7 +282,7 @@ /** * Returns the arguments of the filter from the filter string *

- * @param filter_string filter string given by the user + * @param filterStringAsByteArray filter string given by the user * @return an ArrayList containing the arguments of the filter in the filter string */ public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { @@ -361,7 +354,6 @@ * @param operatorStack the stack containing the operators and parenthesis * @param filterStack the stack containing the filters * @param operator the operator found while parsing the filterString - * @return returns the filterStack after evaluating the stack */ public void reduce(Stack operatorStack, Stack filterStack, @@ -646,7 +638,7 @@ * byte array representing abc *

* @param quotedByteArray the quoted byte array - * @return + * @return Unquoted byte array */ public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) { if (quotedByteArray == null || @@ -665,7 +657,7 @@ * Converts an int expressed in a byte array to an actual int *

* This doesn't use Bytes.toInt because that assumes - * that there will be {@link #SIZEOF_INT} bytes available. + * that there will be {@link Bytes#SIZEOF_INT} bytes available. *

* @param numberAsByteArray the int value expressed as a byte array * @return the int value @@ -688,7 +680,7 @@ * Converts a long expressed in a byte array to an actual long *

* This doesn't use Bytes.toLong because that assumes - * that there will be {@link #SIZEOF_LONG} bytes available. + * that there will be {@link Bytes#SIZEOF_INT} bytes available. *

* @param numberAsByteArray the long value expressed as a byte array * @return the long value Index: src/main/java/org/apache/hadoop/hbase/ServerName.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/ServerName.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/ServerName.java (working copy) @@ -32,7 +32,7 @@ * usually timestamp of server startup). The {@link #toString()} format of * ServerName is safe to use in the filesystem and as znode name up in * ZooKeeper. Its format is: - * <hostname> '{@link #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode>. + * <hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode>. * For example, if hostname is example.org, port is 1234, * and the startcode for the regionserver is 1212121212, then * the {@link #toString()} would be example.org,1234,1212121212. @@ -220,7 +220,7 @@ /** * @param left - * @param rigth + * @param right * @return True if other has same hostname and port. */ public static boolean isSameHostnameAndPort(final ServerName left, Index: src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java (working copy) @@ -31,7 +31,7 @@ /** * Create a loadblanacer from the given conf. * @param conf - * @return + * @return A {@link LoadBalancer} */ public static LoadBalancer getLoadBalancer(Configuration conf) { Index: src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy) @@ -164,15 +164,13 @@ /** * @return HBase root dir. - * @throws IOException */ public Path getRootDir() { return this.rootdir; } /** - * Returns the unique identifier generated for this cluster - * @return + * @return The unique identifier generated for this cluster */ public String getClusterId() { return clusterId; Index: src/main/java/org/apache/hadoop/hbase/master/ServerManager.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (working copy) @@ -243,7 +243,7 @@ } /** - * @param serverName + * @param address * @return HServerLoad if serverName is known else null * @deprecated Use {@link #getLoad(HServerAddress)} */ Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -276,7 +276,7 @@ * Main processing loop for the HMaster. *

    *
  1. Block until becoming active master - *
  2. Finish initialization via {@link #finishInitialization()} + *
  3. Finish initialization via finishInitialization(MonitoredTask) *
  4. Enter loop until we are stopped *
  5. Stop services and perform cleanup once stopped *
@@ -881,7 +881,7 @@ } /** - * Switch for the background {@link CatalogJanitor} thread. + * Switch for the background CatalogJanitor thread. * Used for testing. The thread will continue to run. It will just be a noop * if disabled. * @param b If false, the catalog janitor won't do anything. Index: src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (working copy) @@ -61,7 +61,7 @@ /** * Perform the major balance operation * @param clusterState - * @return + * @return List of plans */ public List balanceCluster(Map> clusterState); @@ -69,7 +69,7 @@ * Perform a Round Robin assignment of regions. * @param regions * @param servers - * @return + * @return Map of servername to regioninfos */ public Map> roundRobinAssignment(List regions, List servers); @@ -77,7 +77,7 @@ * Assign regions to the previously hosting region server * @param regions * @param servers - * @return + * @return List of plans */ public Map> retainAssignment(Map regions, List servers); @@ -85,14 +85,14 @@ * Sync assign a region * @param regions * @param servers - * @return + * @return Map regioninfos to servernames */ public Map immediateAssignment(List regions, List servers); /** * Get a random region server from the list * @param servers - * @return + * @return Servername */ public ServerName randomAssignment(List servers); } Index: src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (working copy) @@ -68,8 +68,8 @@ * znode is created under /hbase/splitlog. SplitLogWorkers race to grab a task. * * SplitLogManager monitors the task znodes that it creates using the - * {@link #timeoutMonitor} thread. If a task's progress is slow then - * {@link #resubmit(String, boolean)} will take away the task from the owner + * timeoutMonitor thread. If a task's progress is slow then + * resubmit(String, boolean) will take away the task from the owner * {@link SplitLogWorker} and the task will be * upforgrabs again. When the task is done then the task's znode is deleted by * SplitLogManager. @@ -122,8 +122,6 @@ * @param conf * @param stopper * @param serverName - * @param services - * @param service */ public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf, Stoppable stopper, String serverName) { @@ -205,7 +203,7 @@ * @throws IOException * if there was an error while splitting any log file * @return cumulative size of the logfiles split - * @throws KeeperException + * @throws IOException */ public long splitLogDistributed(final Path logDir) throws IOException { List logDirs = new ArrayList(); @@ -218,8 +216,7 @@ * available worker region server. This method must only be called after the * region servers have been brought online. * - * @param logDir - * the log directory encoded with a region server name + * @param logDirs * @throws IOException * if there was an error while splitting any log file * @return cumulative size of the logfiles split Index: src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java (working copy) @@ -220,7 +220,7 @@ /** * Called prior to unassigning a given region. * @param ctx the environment to interact with the framework and master - * @param regionName the name of the region + * @param regionInfo * @param force whether to force unassignment or not */ void preUnassign(final ObserverContext ctx, @@ -229,7 +229,7 @@ /** * Called after the region unassignment has been requested. * @param ctx the environment to interact with the framework and master - * @param regionName the name of the region + * @param regionInfo * @param force whether to force unassignment or not */ void postUnassign(final ObserverContext ctx, Index: src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateProtocol.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateProtocol.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateProtocol.java (working copy) @@ -80,11 +80,10 @@ S getSum(ColumnInterpreter ci, Scan scan) throws IOException; /** - * Gives the row count for the given column family and column qualifier, in - * the given row range as defined in the Scan object. * @param ci * @param scan - * @return + * @return Row count for the given column family and column qualifier, in + * the given row range as defined in the Scan object. * @throws IOException */ long getRowNum(ColumnInterpreter ci, Scan scan) @@ -104,7 +103,7 @@ * type. * @param ci * @param scan - * @return + * @return Average * @throws IOException */ Pair getAvg(ColumnInterpreter ci, Scan scan) @@ -120,7 +119,7 @@ * deviation is square root of variance. * @param ci * @param scan - * @return + * @return STD * @throws IOException */ Pair, Long> getStd(ColumnInterpreter ci, Scan scan) Index: src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java (working copy) @@ -42,14 +42,15 @@ * . There is a conversion method * {@link ColumnInterpreter#castToReturnType(Object)} which takes a type and * returns a type. - * @param : T - cell value data type, S - promoted data type + * @param Cell value data type + * @param Promoted data type */ public interface ColumnInterpreter extends Writable { /** * @param colFamily * @param colQualifier - * @param value + * @param kv * @return value of type T * @throws IOException */ @@ -57,44 +58,39 @@ throws IOException; /** - * returns sum or non null value among (if either of them is null); otherwise - * returns a null. * @param l1 * @param l2 - * @return + * @return sum or non null value among (if either of them is null); otherwise + * returns a null. */ public S add(S l1, S l2); /** * returns the maximum value for this type T - * @return + * @return max */ T getMaxValue(); - /** - * @return - */ - T getMinValue(); /** * @param o1 * @param o2 - * @return + * @return multiplication */ S multiply(S o1, S o2); /** * @param o - * @return + * @return increment */ S increment(S o); /** * provides casting opportunity between the data types. * @param o - * @return + * @return cast */ S castToReturnType(T o); @@ -109,10 +105,10 @@ /** * used for computing average of data values. Not providing the divide - * method that takes two values as it si not needed as of now. + * method that takes two values as it is not needed as of now. * @param o * @param l - * @return + * @return Average */ double divideForAvg(S o, Long l); } \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java (working copy) @@ -154,7 +154,7 @@ // and intercept preXXX() method to check user privilege for the given table // and column family. public class AccessControlCoprocessor extends BaseRegionObserverCoprocessor { - @Override + // @Override public Get preGet(CoprocessorEnvironment e, Get get) throws CoprocessorException { @@ -208,9 +208,9 @@ // Aggregation implementation at a region. public static class ColumnAggregationEndpoint extends BaseEndpointCoprocessor implements ColumnAggregationProtocol { - @Override - // Scan the region by the given family and qualifier. Return the aggregation - // result. + // @Override + // Scan the region by the given family and qualifier. Return the aggregation + // result. public int sum(byte[] family, byte[] qualifier) throws IOException { // aggregate at each region Index: src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java (working copy) @@ -159,7 +159,7 @@ * native Hadoop ones (We'll throw a ClassNotFoundException if end up in * here when we should be using native hadoop TotalOrderPartitioner). * @param job - * @return + * @return Context * @throws IOException */ public static TaskAttemptContext getTaskAttemptContext(final Job job) Index: src/main/java/org/apache/hadoop/hbase/KeyValue.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/KeyValue.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java (working copy) @@ -1847,7 +1847,6 @@ * (the value part of the returned KV is always empty). Used in creating * "fake keys" for the multi-column Bloom filter optimization to skip the * row/column we already know is not in the file. - * @param kv the key-value pair to take row and column from * @return the last key on the row/column of the given key-value pair */ public KeyValue createLastOnRowCol() { Index: src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java (working copy) @@ -103,7 +103,7 @@ * The second list is blocksize of the slabs in bytes. (E.g. the slab holds * blocks of this size). * - * @param Configuration file. + * @param conf Configuration file. */ public void addSlabByConf(Configuration conf) { // Proportions we allocate to each slab of the total size. @@ -229,7 +229,8 @@ /** * Get the buffer of the block with the specified name. * - * @param blockName block name + * @param key + * @param caching * @return buffer of specified block name, or null if not in cache */ public Cacheable getBlock(String key, boolean caching) { Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy) @@ -495,10 +495,10 @@ *
    *
  • Construct an {@link HFileBlock.Writer}, providing a compression * algorithm - *
  • Call {@link Writer#startWriting(BlockType)} and get a data stream to + *
  • Call {@link Writer#startWriting(BlockType, boolean)} and get a data stream to * write to *
  • Write your data into the stream - *
  • Call {@link Writer#writeHeaderAndData()} as many times as you need to + *
  • Call {@link Writer#writeHeaderAndData(FSDataOutputStream)} as many times as you need to * store the serialized block into an external stream, or call * {@link Writer#getHeaderAndData()} to get it as a byte array. *
  • Repeat to write more blocks @@ -586,8 +586,6 @@ private long prevOffset; /** - * @param blockType - * block type to create * @param compressionAlgorithm * compression algorithm to use */ @@ -717,7 +715,7 @@ } /** - * Similar to {@link #writeHeaderAndData(DataOutputStream)}, but records + * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records * the offset of this block so that it can be referenced in the next block * of the same type. * @@ -864,7 +862,7 @@ } /** - * Similar to {@link #getUncompressedDataWithHeader()} but returns a byte + * Similar to {@link #getUncompressedBufferWithHeader()} but returns a byte * buffer. * * @return uncompressed block for caching on write in the form of a buffer @@ -1084,20 +1082,15 @@ /** * Decompresses data from the given stream using the configured compression * algorithm. - * - * @param boundedStream + * @param dest + * @param destOffset + * @param bufferedBoundedStream * a stream to read compressed data from, bounded to the exact * amount of compressed data * @param compressedSize * compressed data size, header not included * @param uncompressedSize * uncompressed data size, header not included - * @param header - * the header to include before the decompressed data, or null. - * Only the first {@link HFileBlock#HEADER_SIZE} bytes of the - * buffer are included. - * @return the byte buffer containing the given header (optionally) and the - * decompressed data * @throws IOException */ protected void decompress(byte[] dest, int destOffset, Index: src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (working copy) @@ -74,7 +74,7 @@ /** * Get the statistics for this block cache. - * @return + * @return Stats */ public CacheStats getStats(); Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV1.java (working copy) @@ -56,7 +56,6 @@ * stream. * @param size Length of the stream. * @param cacheConf cache references and configuration - * @throws IOException */ public HFileReaderV1(Path path, FixedFileTrailer trailer, final FSDataInputStream fsdis, final long size, @@ -81,7 +80,7 @@ * Read in the index and file info. * * @return A map of fileinfo data. - * @see {@link Writer#appendFileInfo(byte[], byte[])}. + * @see Writer#appendFileInfo(byte[], byte[]) * @throws IOException */ @Override Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (working copy) @@ -213,7 +213,8 @@ * Read in a file block. * * @param dataBlockOffset offset to read. - * @param onDiskSize size of the block + * @param onDiskBlockSize size of the block + * @param cacheBlock * @param pread Use positional read instead of seek+read (positional is better * doing random reads whereas seek+read is better scanning). * @param isCompaction is this block being read as part of a compaction Index: src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/InlineBlockWriter.java (working copy) @@ -56,7 +56,6 @@ * @param offset the offset of the block in the stream * @param onDiskSize the on-disk size of the block * @param uncompressedSize the uncompressed size of the block - * @param rawSize */ void blockWritten(long offset, int onDiskSize, int uncompressedSize); @@ -69,5 +68,4 @@ * @return true if inline blocks produced by this writer should be cached */ boolean cacheOnWrite(); - -} +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (working copy) @@ -246,18 +246,14 @@ } /** - * Returns the factory to be used to create {@link HFile} writers. Should - * always be {@link HFileWriterV2#WRITER_FACTORY_V2} in production, but - * can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing. + * Returns the factory to be used to create {@link HFile} writers. */ public static final WriterFactory getWriterFactory(Configuration conf) { return HFile.getWriterFactory(conf, new CacheConfig(conf)); } /** - * Returns the factory to be used to create {@link HFile} writers. Should - * always be {@link HFileWriterV2#WRITER_FACTORY_V2} in production, but - * can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing. + * Returns the factory to be used to create {@link HFile} writers */ public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (working copy) @@ -52,7 +52,7 @@ * Examples of how to use the block index writer can be found in * {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how * to use the reader can be found in {@link HFileReaderV2} and - * {@link TestHFileBlockIndex}. + * TestHFileBlockIndex. */ public class HFileBlockIndex { @@ -62,7 +62,7 @@ /** * The maximum size guideline for index blocks (both leaf, intermediate, and - * root). If not specified, {@link #DEFAULT_MAX_CHUNK_SIZE} is used. + * root). If not specified, DEFAULT_MAX_CHUNK_SIZE is used. */ public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size"; @@ -913,8 +913,6 @@ * blocks, so the non-root index format is used. * * @param out - * @param position The beginning offset of the inline block in the file not - * include the header. */ @Override public void writeInlineBlock(DataOutput out) throws IOException { Index: src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (working copy) @@ -225,7 +225,7 @@ } /** - * TODO left from {@HFile} version 1: move this to StoreFile after Ryan's + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's * patch goes in to eliminate {@link KeyValue} here. * * @return the first row key, or null if the file is empty. @@ -239,7 +239,7 @@ } /** - * TODO left from {@HFile} version 1: move this to StoreFile after + * TODO left from {@link HFile} version 1: move this to StoreFile after * Ryan's patch goes in to eliminate {@link KeyValue} here. * * @return the last row key, or null if the file is empty. Index: src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (working copy) @@ -55,7 +55,7 @@ /** * @param fs * @param p - * @param c + * @param cacheConf * @param r * @throws IOException */ Index: src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (working copy) @@ -145,7 +145,7 @@ * Does not timeout. * @param zk If zk is null, we'll create an instance (and shut it down * when {@link #stop()} is called) else we'll use what is passed. - * @param connection server connection + * @param conf * @param abortable If fatal exception we'll call abort on this. May be null. * If it is we'll use the Connection associated with the passed * {@link Configuration} as our Abortable. Index: src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java (working copy) @@ -50,7 +50,7 @@ /** * Update legacy META rows, removing HTD from HRI. * @param masterServices - * @return + * @return List of table descriptors. * @throws IOException */ public static List updateMetaWithNewRegionInfo( @@ -78,7 +78,7 @@ /** * Update the ROOT with new HRI. (HRI with no HTD) * @param masterServices - * @return + * @return List of table descriptors * @throws IOException */ public static List updateRootWithNewRegionInfo( @@ -239,4 +239,4 @@ "Master startup aborted."); } } -} \ No newline at end of file +} Index: src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (working copy) @@ -251,7 +251,7 @@ * @param metaServer connection to server hosting ROOT * @return location of META in ROOT where location, or null if not available * @throws IOException - * @deprecated Does not retry; use {@link #readRegionLocation(CatalogTracker, byte[]) + * @deprecated Does not retry; use {@link #readRegionLocation(CatalogTracker, byte[])} */ public static ServerName readMetaLocation(HRegionInterface metaServer) throws IOException { @@ -589,7 +589,7 @@ /** * @param catalogTracker - * @param hsi Server specification + * @param serverName * @return List of user regions installed on this server (does not include * catalog regions). * @throws IOException @@ -742,4 +742,4 @@ this.results.add(r); } } -} \ No newline at end of file +} Index: src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (working copy) @@ -134,9 +134,9 @@ } /** Returns the server instance called under or null. May be called under - * {@link #call(Class, Writable, long)} implementations, and under {@link Writable} - * methods of paramters and return values. Permits applications to access - * the server context. + * {@link #call(Class, Writable, long, MonitoredRPCHandler)} implementations, + * and under {@link Writable} methods of paramters and return values. + * Permits applications to access the server context. * @return HBaseServer */ public static RpcServer get() { @@ -860,7 +860,6 @@ // Processes one response. Returns true if there are no more pending // data for this channel. // - @SuppressWarnings({"ConstantConditions"}) private boolean processResponse(final LinkedList responseQueue, boolean inHandler) throws IOException { boolean error = true; @@ -1280,14 +1279,15 @@ } + + private Function qosFunction = null; + /** * Gets the QOS level for this call. If it is higher than the highPriorityLevel and there * are priorityHandlers available it will be processed in it's own thread set. * - * @param param - * @return priority, higher is better + * @param newFunc */ - private Function qosFunction = null; @Override public void setQosFunction(Function newFunc) { qosFunction = newFunc; Index: src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/ipc/Delayable.java (working copy) @@ -48,7 +48,7 @@ /** * Signal that the RPC server is now allowed to send the response. * @param result The value to return to the caller. If the corresponding - * {@link #delayResponse(boolean)} specified that the return value should + * delay response specified that the return value should * not be delayed, this parameter must be null. * @throws IOException */