Index: src/main/java/org/apache/hadoop/hbase/TableDescriptors.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/TableDescriptors.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/TableDescriptors.java (working copy) @@ -49,8 +49,6 @@ /** * Get Map of all HTableDescriptors. Populates the descriptor cache as a * side effect. - * @param fs - * @param rootdir * @return Map of all descriptors. * @throws IOException */ @@ -72,4 +70,4 @@ */ public HTableDescriptor remove(final String tablename) throws IOException; -} \ No newline at end of file +} Index: src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (working copy) @@ -59,7 +59,7 @@ * the create it will do a getChildren("/") and see "x-222-1", "x-542-30", * "x-352-109", x-333-110". The process will know that the original create * succeeded an the znode it created is "x-352-109". - * @see http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling + * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling" */ public class RecoverableZooKeeper { private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class); @@ -142,7 +142,7 @@ * exists is an idempotent operation. Retry before throw out exception * @param path * @param watcher - * @return + * @return A Stat instance * @throws KeeperException * @throws InterruptedException */ @@ -177,7 +177,7 @@ * exists is an idempotent operation. Retry before throw out exception * @param path * @param watch - * @return + * @return A Stat instance * @throws KeeperException * @throws InterruptedException */ @@ -212,7 +212,7 @@ * getChildren is an idempotent operation. Retry before throw out exception * @param path * @param watcher - * @return + * @return List of children znodes * @throws KeeperException * @throws InterruptedException */ @@ -247,7 +247,7 @@ * getChildren is an idempotent operation. Retry before throw out exception * @param path * @param watch - * @return + * @return List of children znodes * @throws KeeperException * @throws InterruptedException */ @@ -283,7 +283,7 @@ * @param path * @param watcher * @param stat - * @return + * @return Data * @throws KeeperException * @throws InterruptedException */ @@ -320,7 +320,7 @@ * @param path * @param watch * @param stat - * @return + * @return Data * @throws KeeperException * @throws InterruptedException */ @@ -359,7 +359,7 @@ * @param path * @param data * @param version - * @return + * @return Stat instance * @throws KeeperException * @throws InterruptedException */ @@ -427,7 +427,7 @@ * @param data * @param acl * @param createMode - * @return + * @return Path * @throws KeeperException * @throws InterruptedException */ Index: src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (working copy) @@ -68,7 +68,7 @@ * Starts the tracking of the node in ZooKeeper. * *
Use {@link #blockUntilAvailable()} to block until the node is available - * or {@link #getData()} to get the data of the node if it is available. + * or {@link #getData(boolean)} to get the data of the node if it is available. */ public synchronized void start() { this.watcher.registerListener(this); @@ -143,7 +143,7 @@ *
If the node is currently available, the most up-to-date known version of * the data is returned. If the node is not currently available, null is * returned. - * @param whether to refresh the data by calling ZK directly. + * @param refresh whether to refresh the data by calling ZK directly. * @return data of the node, null if unavailable */ public synchronized byte [] getData(boolean refresh) { Index: src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java (working copy) @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData; import org.apache.zookeeper.KeeperException; /** @@ -41,7 +40,7 @@ * listening for changes in the RS node list and watching each node. * *
If an RS node gets deleted, this automatically handles calling of
- * {@link ServerManager#expireServer(org.apache.hadoop.hbase.HServerInfo)}.
+ * {@link ServerManager#expireServer(ServerName)}
*/
public class RegionServerTracker extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(RegionServerTracker.class);
@@ -121,7 +120,6 @@
/**
* Gets the online servers.
* @return list of online servers
- * @throws KeeperException
*/
public List
HRegionInfo#getRegionName()
- * @param origin Originating {@link ServerName}
+ * @param serverName Originating {@link ServerName}
* @param payload Payload examples include the daughters involved in a
* {@link EventType#RS_ZK_REGION_SPLIT}. Can be null
*/
Index: src/main/java/org/apache/hadoop/hbase/HServerAddress.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/HServerAddress.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/HServerAddress.java (working copy)
@@ -90,7 +90,7 @@
}
/** @return Bind address -- the raw IP, the result of a call to
- * {@link InetSocketAddress#getAddress()#getHostAddress()} --
+ * InetSocketAddress#getAddress()#getHostAddress() --
* or null if cannot resolve */
public String getBindAddress() {
return getBindAddressInternal(address);
Index: src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java (working copy)
@@ -37,8 +37,7 @@
* When creating the filter, the sender can choose its desired point in a
* trade-off between the false positive rate and the size.
*
- * @see {@link BloomFilterWriter} for the ability to add elements to a Bloom
- * filter
+ * @see BloomFilterWriter for the ability to add elements to a Bloom filter
*/
public interface BloomFilter extends BloomFilterBase {
@@ -59,5 +58,4 @@
* and thus allows a null byte buffer to be passed to contains()
*/
boolean supportsAutoLoading();
-
-}
+}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy)
@@ -19,6 +19,17 @@
*/
package org.apache.hadoop.hbase.util;
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -38,23 +49,11 @@
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
-import java.io.DataInputStream;
-import java.io.EOFException;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
/**
* Utility methods for interacting with the underlying file system.
*/
@@ -523,7 +522,7 @@
/**
* Compute HDFS blocks distribution of a given file, or a portion of the file
* @param fs file system
- * @param FileStatus file status of the file
+ * @param status file status of the file
* @param start start position of the portion
* @param length length of the portion
* @return The HDFS blocks distribution
@@ -1104,7 +1103,7 @@
/**
* Update table descriptor
* @param fs
- * @param conf
+ * @param rootdir
* @param hTableDescriptor
* @throws IOException
*/
Index: src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/ByteBloomFilter.java (working copy)
@@ -164,8 +164,7 @@
* @param bitSize
* @param errorRate
* @return maximum number of keys that can be inserted into the Bloom filter
- * @see {@link #computeMaxKeys(long, double, int)} for a more precise
- * estimate
+ * @see #computeMaxKeys(long, double, int) for a more precise estimate
*/
public static long idealMaxKeys(long bitSize, double errorRate) {
// The reason we need to use floor here is that otherwise we might put
@@ -227,7 +226,7 @@
*
* @param bitSize
* @param foldFactor
- * @return
+ * @return Foldable byte size
*/
public static int computeFoldableByteSize(long bitSize, int foldFactor) {
long byteSizeLong = (bitSize + 7) / 8;
Index: src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java (working copy)
@@ -161,11 +161,11 @@
* {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
*
* @param conf
+ * @param cacheConf
* @param bloomType
* @param maxKeys an estimate of the number of keys we expect to insert.
* Irrelevant if compound Bloom filters are enabled.
* @param writer the HFile writer
- * @param bloomErrorRate
* @return the new Bloom filter, or null in case Bloom filters are disabled
* or when failed to create one.
*/
@@ -231,10 +231,10 @@
* Creates a new Delete Family Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
* @param conf
+ * @param cacheConf
* @param maxKeys an estimate of the number of keys we expect to insert.
* Irrelevant if compound Bloom filters are enabled.
* @param writer the HFile writer
- * @param bloomErrorRate
* @return the new Bloom filter, or null in case Bloom filters are disabled
* or when failed to create one.
*/
Index: src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterBase.java (working copy)
@@ -71,7 +71,7 @@
/**
* Prepare an ordered pair of row and qualifier to be compared using
- * {@link KeyValue.KeyComparator}. This is only used for row-column Bloom
+ * KeyValue.KeyComparator. This is only used for row-column Bloom
* filters.
*/
@Override
Index: src/main/java/org/apache/hadoop/hbase/util/Bytes.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/Bytes.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/Bytes.java (working copy)
@@ -759,7 +759,7 @@
* This method will get a sequence of bytes from pos -> limit,
* but will restore pos after.
* @param buf
- * @return
+ * @return byte array
*/
public static byte[] getBytes(ByteBuffer buf) {
int savedPos = buf.position();
Index: src/main/java/org/apache/hadoop/hbase/util/Objects.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/util/Objects.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/util/Objects.java (working copy)
@@ -21,14 +21,10 @@
import java.lang.reflect.Array;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Action;
@@ -129,7 +125,7 @@
* Attempts to construct a text description of the given object, by
* introspecting known classes and building a description of size.
* @param obj
- * @return
+ * @return Description
*/
public static String describeQuantity(Object obj) {
StringBuilder str = new StringBuilder();
Index: src/main/java/org/apache/hadoop/hbase/client/Mutation.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/Mutation.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/client/Mutation.java (working copy)
@@ -131,7 +131,6 @@
/**
* Method for setting the put's familyMap
- * @return familyMap
*/
public void setFamilyMap(Map- * If you find yourself wanting to use a {@link Connection} for a relatively + * If you find yourself wanting to use a {@link HConnection} for a relatively * short duration of time, and do not want to deal with the hassle of creating * and cleaning up that resource, then you should consider using this * convenience class. Index: src/main/java/org/apache/hadoop/hbase/client/Append.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Append.java (revision 1188497) +++ src/main/java/org/apache/hadoop/hbase/client/Append.java (working copy) @@ -39,7 +39,7 @@ *
* To append to a set of columns of a row, instantiate an Append object with the
* row to append to. At least one column to append must be specified using the
- * {@link #add(byte[], byte[], long)} method.
+ * {@link #add(byte[], byte[], byte[])} method.
*/
public class Append extends Mutation implements Writable {
// TODO: refactor to derive from Put?
Index: src/main/java/org/apache/hadoop/hbase/client/HConnection.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/client/HConnection.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/client/HConnection.java (working copy)
@@ -21,7 +21,6 @@
import java.io.Closeable;
import java.io.IOException;
-import java.net.InetSocketAddress;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
@@ -192,7 +191,7 @@
* @param regionServer - the server to connect to
* @return proxy for HRegionServer
* @throws IOException if a remote or network exception occurs
- * @deprecated Use {@link #getHRegionConnection(InetSocketAddress)}
+ * @deprecated Use {@link #getHRegionConnection(String, int)}
*/
public HRegionInterface getHRegionConnection(HServerAddress regionServer)
throws IOException;
@@ -384,4 +383,4 @@
public HTableDescriptor[] getHTableDescriptors(List
- * Equivalent to {@code {@link #incrementColumnValue(byte[], byte[], byte[],
+ * Equivalent to {@link #incrementColumnValue(byte[], byte[], byte[],
* long, boolean) incrementColumnValue}(row, family, qualifier, amount,
* true)}
* @param row The row that contains the cell to increment.
@@ -392,7 +392,7 @@
*
* @param protocol The class or interface defining the remote protocol
* @param row The row key used to identify the remote region location
- * @return
+ * @return A CoprocessorProtocol instance
*/
* For each result, the given
- * {@link Batch.Callback#update(byte[], byte[], Object)}
+ * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
* method will be called.
* Here is an example of how you would use this class:
*
* This class has a number of metrics variables that are publicly accessible;
* these variables (objects) have methods to update their values;
- * for example:
- * {@link #rpcQueueTime}.inc(time)
+ * for example: rpcQueueTime.inc(time)
*
*/
public class RegionServerDynamicMetrics implements Updater {
Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (working copy)
@@ -548,8 +548,7 @@
* acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
* guarantee that two workers will not be executing the same task therefore it
* is better to have workers prepare the task and then have the
- * {@link SplitLogManager} commit the work in
- * {@link SplitLogManager.TaskFinisher}
+ * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
*/
static public interface TaskExecutor {
static public enum Status {
Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -346,7 +346,7 @@
/**
* HRegion constructor. his constructor should only be used for testing and
* extensions. Instances of HRegion should be instantiated with the
- * {@link HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)} method.
+ * {@link HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)} method.
*
*
* @param tableDir qualified path of directory where region should be located,
@@ -364,7 +364,7 @@
* is new), then read them from the supplied path.
* @param rsServices reference to {@link RegionServerServices} or null
*
- * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
+ * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)
*/
public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
HRegionInfo regionInfo, final HTableDescriptor htd,
@@ -1161,7 +1161,7 @@
* @param status
* @return true if the region needs compacting
* @throws IOException
- * @see #internalFlushcache()
+ * @see #internalFlushcache(MonitoredTask)
*/
protected boolean internalFlushcache(
final HLog wal, final long myseqid, MonitoredTask status)
@@ -2585,7 +2585,7 @@
/**
* Release the row lock!
- * @param lockid The lock ID to release.
+ * @param lockId The lock ID to release.
*/
public void releaseRowLock(final Integer lockId) {
HashedBytes rowKey = lockIds.remove(lockId);
@@ -2995,13 +2995,14 @@
/**
* Open a Region.
- * @param info Info for region to be opened.
+ * @param info Info for region to be opened
+ * @param htd
* @param wal HLog for region to use. This method will call
* HLog#setSequenceNumber(long) passing the result of the call to
* HRegion#getMinSequenceId() to ensure the log id is properly kept
* up. HRegionStore does this every time it opens a new region.
* @param conf
- * @param flusher An interface we can request flushes against.
+ * @param rsServices An interface we can request flushes against.
* @param reporter An interface we can report progress against.
* @return new HRegion
*
@@ -3565,7 +3566,6 @@
*
* @param append
* @param lockid
- * @param returnResult
* @param writeToWAL
* @return new keyvalues after increment
* @throws IOException
Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy)
@@ -70,7 +70,7 @@
/**
* A Store data file. Stores usually have one or more of these files. They
* are produced by flushing the memstore to disk. To
- * create, call {@link #createWriter(FileSystem, Path, int, Configuration)}
+ * create, call {@link #createWriter(FileSystem, Path, int, Configuration, CacheConfig)}
* and append data. Be sure to add any metadata before calling close on the
* Writer (Use the appendMetadata convenience methods). On close, a StoreFile
* is sitting in the Filesystem. To refer to it, create a StoreFile instance
@@ -402,7 +402,7 @@
* helper function to compute HDFS blocks distribution of a given file.
* For reference file, it is an estimate
* @param fs The FileSystem
- * @param o The path of the file
+ * @param p The path of the file
* @return HDFS blocks distribution
*/
static public HDFSBlocksDistribution computeHDFSBlockDistribution(
@@ -531,7 +531,6 @@
/**
* @return Current reader. Must call createReader first else returns null.
- * @throws IOException
* @see #createReader()
*/
public Reader getReader() {
@@ -539,7 +538,7 @@
}
/**
- * @param b
+ * @param evictOnClose
* @throws IOException
*/
public synchronized void closeReader(boolean evictOnClose)
@@ -855,7 +854,6 @@
* If the timeRangeTracker is not set,
* update TimeRangeTracker to include the timestamp of this key
* @param kv
- * @throws IOException
*/
public void trackTimestamps(final KeyValue kv) {
if (KeyValue.Type.Put.getCode() == kv.getType()) {
@@ -1243,7 +1241,7 @@
/**
* A method for checking Bloom filters. Called directly from
- * {@link StoreFileScanner} in case of a multi-column query.
+ * StoreFileScanner in case of a multi-column query.
*
* @param row
* @param rowOffset
@@ -1251,7 +1249,7 @@
* @param col
* @param colOffset
* @param colLen
- * @return
+ * @return True if passes
*/
public boolean passesGeneralBloomFilter(byte[] row, int rowOffset,
int rowLen, byte[] col, int colOffset, int colLen) {
Index: src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java (working copy)
@@ -94,11 +94,10 @@
/**
* Does the real seek operation in case it was skipped by
- * {@link #seekToRowCol(KeyValue, boolean)}. Note that this function should
+ * seekToRowCol(KeyValue, boolean) (TODO: Whats this?). Note that this function should
* be never called on scanners that always do real seek operations (i.e. most
* of the scanners). The easiest way to achieve this is to call
* {@link #realSeekDone()} first.
*/
public void enforceSeek() throws IOException;
-
-}
+}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java (working copy)
@@ -47,7 +47,7 @@
/**
* Constructor
* @param value value
- * @param BitwiseOp bitOperator - the operator to use on the bit comparison
+ * @param bitOperator operator to use on the bit comparison
*/
public BitComparator(byte[] value, BitwiseOp bitOperator) {
super(value);
Index: src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java (working copy)
@@ -20,11 +20,11 @@
package org.apache.hadoop.hbase.filter;
+import java.util.ArrayList;
+
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
-import java.util.List;
-import java.util.ArrayList;
-
/**
* This filter is used to filter based on the key. It takes an operator
* (equal, greater, not equal, etc) and a byte [] comparator for the row,
Index: src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java (working copy)
@@ -19,26 +19,19 @@
*/
package org.apache.hadoop.hbase.filter;
-import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
-import java.util.TreeSet;
import java.util.ArrayList;
-import java.util.Stack;
+import java.util.EmptyStackException;
import java.util.HashMap;
import java.util.Set;
+import java.util.Stack;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.ParseConstants;
-
-import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import java.lang.ArrayIndexOutOfBoundsException;
-import java.lang.ClassCastException;
-import java.lang.reflect.*;
-import java.util.EmptyStackException;
+import org.apache.hadoop.hbase.util.Bytes;
/**
* This class allows a user to specify a filter via a string
@@ -289,7 +282,7 @@
/**
* Returns the arguments of the filter from the filter string
*
- * @param filter_string filter string given by the user
+ * @param filterStringAsByteArray filter string given by the user
* @return an ArrayList containing the arguments of the filter in the filter string
*/
public static ArrayList
* @param quotedByteArray the quoted byte array
- * @return
+ * @return Unquoted byte array
*/
public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) {
if (quotedByteArray == null ||
@@ -665,7 +657,7 @@
* Converts an int expressed in a byte array to an actual int
*
* This doesn't use Bytes.toInt because that assumes
- * that there will be {@link #SIZEOF_INT} bytes available.
+ * that there will be {@link Bytes#SIZEOF_INT} bytes available.
*
* @param numberAsByteArray the int value expressed as a byte array
* @return the int value
@@ -688,7 +680,7 @@
* Converts a long expressed in a byte array to an actual long
*
* This doesn't use Bytes.toLong because that assumes
- * that there will be {@link #SIZEOF_LONG} bytes available.
+ * that there will be {@link Bytes#SIZEOF_INT} bytes available.
*
* @param numberAsByteArray the long value expressed as a byte array
* @return the long value
Index: src/main/java/org/apache/hadoop/hbase/ServerName.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/ServerName.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/ServerName.java (working copy)
@@ -32,7 +32,7 @@
* usually timestamp of server startup). The {@link #toString()} format of
* ServerName is safe to use in the filesystem and as znode name up in
* ZooKeeper. Its format is:
- * Map of region names to
- * {@link Batch.Call#call(Object)} return values
+ * {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} return values
*/
@@ -145,9 +145,6 @@
/**
* Constructor
- * @param services So we can online new regions. If null, we'll skip onlining
- * (Useful testing).
- * @param c Configuration to use running split
* @param r Region to split
* @param splitrow Row to split around
*/
@@ -430,8 +427,7 @@
* @param services Used to online/offline regions.
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
* @return Regions created
- * @throws KeeperException
- * @throws NodeExistsException
+ * @throws IOException
* @see #rollback(Server, RegionServerServices)
*/
public PairOfSameType<hostname> '{@link #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode>.
+ * <hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode>.
* For example, if hostname is example.org, port is 1234,
* and the startcode for the regionserver is 1212121212, then
* the {@link #toString()} would be example.org,1234,1212121212.
@@ -220,7 +220,7 @@
/**
* @param left
- * @param rigth
+ * @param right
* @return True if other has same hostname and port.
*/
public static boolean isSameHostnameAndPort(final ServerName left,
Index: src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/master/LoadBalancerFactory.java (working copy)
@@ -31,7 +31,7 @@
/**
* Create a loadblanacer from the given conf.
* @param conf
- * @return
+ * @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {
Index: src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy)
@@ -164,15 +164,13 @@
/**
* @return HBase root dir.
- * @throws IOException
*/
public Path getRootDir() {
return this.rootdir;
}
/**
- * Returns the unique identifier generated for this cluster
- * @return
+ * @return The unique identifier generated for this cluster
*/
public String getClusterId() {
return clusterId;
Index: src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (working copy)
@@ -243,7 +243,7 @@
}
/**
- * @param serverName
+ * @param address
* @return HServerLoad if serverName is known else null
* @deprecated Use {@link #getLoad(HServerAddress)}
*/
Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy)
@@ -276,7 +276,7 @@
* Main processing loop for the HMaster.
*
*
@@ -881,7 +881,7 @@
}
/**
- * Switch for the background {@link CatalogJanitor} thread.
+ * Switch for the background CatalogJanitor thread.
* Used for testing. The thread will continue to run. It will just be a noop
* if disabled.
* @param b If false, the catalog janitor won't do anything.
Index: src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java (working copy)
@@ -61,7 +61,7 @@
/**
* Perform the major balance operation
* @param clusterState
- * @return
+ * @return List of plans
*/
public List getAvg(ColumnInterpreter, Long> getStd(ColumnInterpreter
. There is a conversion method
* {@link ColumnInterpreter#castToReturnType(Object)} which takes a type.
- * @param Promoted data type
*/
public interface ColumnInterpreter data values. Not providing the divide
- * method that takes two values as it si not needed as of now.
+ * method that takes two values as it is not needed as of now.
* @param o
* @param l
- * @return
+ * @return Average
*/
double divideForAvg(S o, Long l);
}
\ No newline at end of file
Index: src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java (working copy)
@@ -154,7 +154,7 @@
// and intercept preXXX() method to check user privilege for the given table
// and column family.
public class AccessControlCoprocessor extends BaseRegionObserverCoprocessor {
- @Override
+ // @Override
public Get preGet(CoprocessorEnvironment e, Get get)
throws CoprocessorException {
@@ -208,9 +208,9 @@
// Aggregation implementation at a region.
public static class ColumnAggregationEndpoint extends BaseEndpointCoprocessor
implements ColumnAggregationProtocol {
- @Override
- // Scan the region by the given family and qualifier. Return the aggregation
- // result.
+ // @Override
+ // Scan the region by the given family and qualifier. Return the aggregation
+ // result.
public int sum(byte[] family, byte[] qualifier)
throws IOException {
// aggregate at each region
Index: src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/mapreduce/hadoopbackport/InputSampler.java (working copy)
@@ -159,7 +159,7 @@
* native Hadoop ones (We'll throw a ClassNotFoundException if end up in
* here when we should be using native hadoop TotalOrderPartitioner).
* @param job
- * @return
+ * @return Context
* @throws IOException
*/
public static TaskAttemptContext getTaskAttemptContext(final Job job)
Index: src/main/java/org/apache/hadoop/hbase/KeyValue.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/KeyValue.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/KeyValue.java (working copy)
@@ -1847,7 +1847,6 @@
* (the value part of the returned KV is always empty). Used in creating
* "fake keys" for the multi-column Bloom filter optimization to skip the
* row/column we already know is not in the file.
- * @param kv the key-value pair to take row and column from
* @return the last key on the row/column of the given key-value pair
*/
public KeyValue createLastOnRowCol() {
Index: src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java (working copy)
@@ -103,7 +103,7 @@
* The second list is blocksize of the slabs in bytes. (E.g. the slab holds
* blocks of this size).
*
- * @param Configuration file.
+ * @param conf Configuration file.
*/
public void addSlabByConf(Configuration conf) {
// Proportions we allocate to each slab of the total size.
@@ -229,7 +229,8 @@
/**
* Get the buffer of the block with the specified name.
*
- * @param blockName block name
+ * @param key
+ * @param caching
* @return buffer of specified block name, or null if not in cache
*/
public Cacheable getBlock(String key, boolean caching) {
Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java (working copy)
@@ -495,10 +495,10 @@
*
*
DEFAULT_MAX_CHUNK_SIZE is used.
*/
public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size";
@@ -913,8 +913,6 @@
* blocks, so the non-root index format is used.
*
* @param out
- * @param position The beginning offset of the inline block in the file not
- * include the header.
*/
@Override
public void writeInlineBlock(DataOutput out) throws IOException {
Index: src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java (working copy)
@@ -225,7 +225,7 @@
}
/**
- * TODO left from {@HFile} version 1: move this to StoreFile after Ryan's
+ * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's
* patch goes in to eliminate {@link KeyValue} here.
*
* @return the first row key, or null if the file is empty.
@@ -239,7 +239,7 @@
}
/**
- * TODO left from {@HFile} version 1: move this to StoreFile after
+ * TODO left from {@link HFile} version 1: move this to StoreFile after
* Ryan's patch goes in to eliminate {@link KeyValue} here.
*
* @return the last row key, or null if the file is empty.
Index: src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java (working copy)
@@ -55,7 +55,7 @@
/**
* @param fs
* @param p
- * @param c
+ * @param cacheConf
* @param r
* @throws IOException
*/
Index: src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (working copy)
@@ -145,7 +145,7 @@
* Does not timeout.
* @param zk If zk is null, we'll create an instance (and shut it down
* when {@link #stop()} is called) else we'll use what is passed.
- * @param connection server connection
+ * @param conf
* @param abortable If fatal exception we'll call abort on this. May be null.
* If it is we'll use the Connection associated with the passed
* {@link Configuration} as our Abortable.
Index: src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java (revision 1188497)
+++ src/main/java/org/apache/hadoop/hbase/catalog/MetaMigrationRemovingHTD.java (working copy)
@@ -50,7 +50,7 @@
/**
* Update legacy META rows, removing HTD from HRI.
* @param masterServices
- * @return
+ * @return List of table descriptors.
* @throws IOException
*/
public static List