diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 812e4bf..304e698 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -477,7 +477,7 @@ class AsyncProcess {
/**
* Helper that is used when grouping the actions per region server.
*
- * @param loc - the destination. Must not be null.
+ * @param server - the destination. Must not be null.
* @param action - the action to add to the multiaction
* @param actionsByServer the multiaction per server
* @param nonceGroup Nonce group.
@@ -1839,7 +1839,7 @@ class AsyncProcess {
/**
* Only used w/useGlobalErrors ctor argument, for HTable backward compat.
* @return Whether there were any errors in any request since the last time
- * {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created.
+ * {@link #waitForAllPreviousOpsAndReset(java.util.List, String)} was called, or AP was created.
*/
public boolean hasError() {
return globalErrors.hasErrors();
@@ -1850,9 +1850,9 @@ class AsyncProcess {
* Waits for all previous operations to finish, and returns errors and (optionally)
* failed operations themselves.
* @param failedRows an optional list into which the rows that failed since the last time
- * {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created, are saved.
+ * {@link #waitForAllPreviousOpsAndReset(java.util.List, String)} was called, or AP was created, are saved.
* @param tableName name of the table
- * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List)}
+ * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(java.util.List, String)}
* was called, or AP was created.
*/
public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
index 9efb33d..db6566c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java
@@ -177,7 +177,7 @@ class ClusterStatusListener implements Closeable {
/**
- * An implementation using a multicast message between the master & the client.
+ * An implementation using a multicast message between the master & the client.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
class MulticastListener implements Listener {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index c886b34..afe3566 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -78,7 +78,7 @@ public class Delete extends Mutation implements Comparablefuture.
+ * Do a get with a timeout against the passed in future.
*/
private static
* Evaluated according to minStamp <= timestamp < maxStamp * or [minStamp,maxStamp) in interval notation. @@ -89,7 +89,7 @@ public class TimeRange { * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive - * @throws IllegalArgumentException if either <0, + * @throws IllegalArgumentException if either ≤ 0, * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above */ @Deprecated diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java index 6264a5e..a8fb677 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java @@ -37,7 +37,7 @@ import org.junit.runner.notification.RunListener; * - threads * - open file descriptor * - max open file descriptor - *
+ ** This is useful to dynamically adjust max time out values when same test cases run in different - * test machine settings without recompiling & re-deploying code. - *
+ * test machine settings without recompiling & re-deploying code. + * * The value is obtained from the Java System property or configuration setting *hbase.test.wait.for.ratio which defaults to 1.
* @param conf the configuration
@@ -83,7 +82,7 @@ public final class Waiter {
/**
* A predicate 'closure' used by the {@link Waiter#waitFor(Configuration, long, Predicate)} and
* {@link Waiter#waitFor(Configuration, long, Predicate)} and
- * {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate) methods.
+ * {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)} methods.
*/
@InterfaceAudience.Private
public interface Predicatetrue, failing the test if the timeout is reached and the Predicate is still
* false.
- *
* @param conf the configuration
* @param timeout the timeout in milliseconds to wait for the predicate.
* @param predicate the predicate to evaluate.
@@ -148,7 +146,6 @@ public final class Waiter {
* {@link #getWaitForRatio(Configuration)} for the given {@link Predicate} to become
* true, failing the test if the timeout is reached and the Predicate is still
* false.
- *
* @param conf the configuration
* @param timeout the max timeout in milliseconds to wait for the predicate.
* @param interval the interval in milliseconds to evaluate predicate.
@@ -166,7 +163,6 @@ public final class Waiter {
* {@link #getWaitForRatio(Configuration)} for the given {@link Predicate} to become
* true, failing the test if the timeout is reached, the Predicate is still
* false and failIfTimeout is set as true.
- *
* @param conf the configuration
* @param timeout the timeout in milliseconds to wait for the predicate.
* @param interval the interval in milliseconds to evaluate predicate.
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
index b6f1aeb..5a66e2a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.util.Shell;
/**
* A default cluster manager for HBase. Uses SSH, and hbase shell scripts
* to manage the cluster. Assumes Unix-like commands are available like 'ps',
- * 'kill', etc. Also assumes the user running the test has enough "power" to start & stop
+ * 'kill', etc. Also assumes the user running the test has enough "power" to start & stop
* servers on the remote machines (for example, the test user could be the same user as the
* user the daemon is running as)
*/
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
index d3433c7..0b56727 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
@@ -35,10 +35,10 @@ import org.junit.Before;
/**
* Base class for HBase integration tests that want to use the Chaos Monkey.
- * Usage: bin/hbase
+ *
* Example usage:
- *
+ *+ * */ @Category(IntegrationTests.class) public class IntegrationTestRegionReplicaReplication extends IntegrationTestIngest { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index f00127f..a359c51 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -91,14 +91,14 @@ import java.util.concurrent.atomic.AtomicLong; * It starts an MR job that creates linked chains * * The format of rows is like this: - * Row Key -> Long + * Row Key -≥ Long * - * L:<< Chain Id >> -> Row Key of the next link in the chain - * S:<< Chain Id >> -> The step in the chain that his link is. - * D:<< Chain Id >> -> Random Data. + * L:≤≤ Chain Id ≥≥ -≥ Row Key of the next link in the chain + * S:≤≤ Chain Id ≥≥ -≥ The step in the chain that his link is. + * D:≤≤ Chain Id ≥≥ -≥ Random Data. * * All chains start on row 0. - * All rk's are > 0. + * All rk's are ≥ 0. * * After creating the linked lists they are walked over using a TableMapper based Mapreduce Job. * diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java index 1a152e8..abd72fc 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java @@ -43,11 +43,11 @@ import org.junit.experimental.categories.Category; * 'aaa' to 'zzz', and for each row, sets the columns f1:(null) and f2:(null) to be * the the same as the row value. ** hbase org.apache.hadoop.hbase.IntegrationTestRegionReplicaReplication * -DIntegrationTestRegionReplicaReplication.num_keys_per_server=10000 * -Dhbase.IntegrationTestRegionReplicaReplication.runtime=600000 @@ -65,7 +65,7 @@ import com.google.common.collect.Lists; * -DIntegrationTestRegionReplicaReplication.region_replica_id=2 * -DIntegrationTestRegionReplicaReplication.num_read_threads=100 * -DIntegrationTestRegionReplicaReplication.num_write_threads=100 - *
- * aaa, f1: => aaa - * aaa, f2: => aaa - * aab, f1: => aab + * aaa, f1: =≥ aaa + * aaa, f2: =≥ aaa + * aab, f1: =≥ aab * .... - * zzz, f2: => zzz + * zzz, f2: =≥ zzz ** * Then the test creates a snapshot from this table, and overrides the values in the original diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 437f200..9a69e37 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -79,29 +79,28 @@ import com.google.common.base.Objects; * different threads: *
If number of clients > 1, we start up a MapReduce job. Each map task + *
If number of clients ≥ 1, we start up a MapReduce job. Each map task * runs an individual client. Each client does about 1GB of data. */ public class PerformanceEvaluation extends Configured implements Tool { @@ -779,8 +779,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation.Test - * tests}. This makes the reflection logic a little easier to understand... + * Wraps up options passed to {@link org.apache.hadoop.hbase.rest.PerformanceEvaluation.TestOptions}. + * This makes the reflection logic a little easier to understand... */ static class TestOptions { private int startRow; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java index 530a323..9e658b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java @@ -54,7 +54,8 @@ class HealthChecker { /** * Initialize. * - * @param configuration + * @param location + * @param timeout */ public void init(String location, long timeout) { this.healthCheckScript = location; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 63d88ef..0b8bf84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -67,7 +67,7 @@ class HFileArchiveManager { /** * Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next - * pass of the {@link HFileCleaner}, if the HFiles are retained by another cleaner. + * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another cleaner. * @param table name of the table for which to disable hfile retention. * @return this for chaining. * @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java index f5fc979..4425075 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java @@ -96,7 +96,6 @@ public class ForeignExceptionDispatcher implements ForeignExceptionListener, For /** * Sends an exception to all listeners. - * @param message human readable message passed to the listener * @param e {@link ForeignException} containing the cause. Can be null. */ private void dispatch(ForeignException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index b4f1953..411ad74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -221,7 +221,6 @@ public class HFileSystem extends FilterFileSystem { /** * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient * linked to this FileSystem. See HBASE-6435 for the background. - *
* There should be no reason, except testing, to create a specific ReorderBlocks. * * @return true if the interceptor was added, false otherwise. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 0546253..70af2c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -683,7 +683,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { /** * Creates a new SaslClientCallbackHandler. * @param userName SASL user name - * @Param password SASL password + * @param password SASL password */ public SaslClientCallbackHandler(String userName, char[] password) { this.password = password; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index e2af762..02d881e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -126,8 +126,7 @@ public class CellCounter extends Configured implements Tool { * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, - * org.apache.hadoop.mapreduce.Mapper.Context) + * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context) */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index aca84fd..c5e0f44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -76,8 +76,7 @@ public class RowCounter extends Configured implements Tool { * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, - * org.apache.hadoop.mapreduce.Mapper.Context) + * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context) */ @Override public void map(ImmutableBytesWritable row, Result values, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 9ebb3c1..fcbfe0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -136,7 +136,7 @@ extends InputFormatpackagedClasses corresponding to class files
* contained in jar.
* @param jar The jar who's content to list.
- * @param packagedClasses map[class -> jar]
+ * @param packagedClasses map[class -≥ jar]
*/
private static void updateMap(String jar, Map
- * Proxy <------------------
+ * Proxy ≤------------------
* | \
* v \
* PassthroughInvocationHandler | weak reference
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
index 523b056..dbcfd4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
@@ -75,7 +75,7 @@ class NamespaceStateManager {
/**
* Check if adding a region violates namespace quota, if not update namespace cache.
*
- * @param TableName
+ * @param name name of the table
* @param regionName
* @param incr
* @return true, if region can be added to table.
@@ -108,7 +108,7 @@ class NamespaceStateManager {
/**
* Check and update region count for an existing table. To handle scenarios like restore snapshot
- * @param TableName name of the table for region count needs to be checked and updated
+ * @param name name of the table for region count needs to be checked and updated
* @param incr count of regions
* @throws QuotaExceededException if quota exceeds for the number of regions allowed in a
* namespace
@@ -183,7 +183,7 @@ class NamespaceStateManager {
/**
* Delete the namespace state.
*
- * @param An instance of NamespaceTableAndRegionInfo
+ * @param namespace instance of NamespaceTableAndRegionInfo
*/
void deleteNamespace(String namespace) {
this.nsStateCache.remove(namespace);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 89e723e..18bafad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1684,7 +1684,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * A split takes the config from the parent region & passes it to the daughter
+ * A split takes the config from the parent region & passes it to the daughter
* region's constructor. If 'conf' was passed, you would end up using the HTD
* of the parent region in addition to the new daughter HTD. Pass 'baseConf'
* to the daughter regions to avoid this tricky dedupe problem.
@@ -3767,7 +3767,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Atomically apply the given map of family->edits to the memstore.
+ * Atomically apply the given map of family-≥edits to the memstore.
* This handles the consistency control on its own, but the caller
* should already have locked updatesLock.readLock(). This also does
* not check the families for validity.
@@ -3862,9 +3862,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
/**
- * Append the given map of family->edits to a WALEdit data structure.
+ * Append the given map of family-≥edits to a WALEdit data structure.
* This does not write to the WAL itself.
- * @param familyMap map of family->edits
+ * @param familyMap map of family-≥edits
* @param walEdit the destination entry to append into
*/
private void addFamilyMapToWALEdit(Map> familyMap,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index a69d8c0..98d93b8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -723,7 +723,7 @@ class MemStoreFlusher implements FlushRequester {
/**
* @param maximumWait
- * @return True if we have been delayed > maximumWait milliseconds.
+ * @return True if we have been delayed ≥ maximumWait milliseconds.
*/
public boolean isMaximumWait(final long maximumWait) {
return (EnvironmentEdgeManager.currentTime() - this.createTime) > maximumWait;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
index 7424e4e..d2e78e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
@@ -162,7 +162,7 @@ public class MultiVersionConcurrencyControl {
*
* @param writeEntry
*
- * @return true if e is visible to MVCC readers (that is, readpoint >= e.writeNumber)
+ * @return true if e is visible to MVCC readers (that is, readpoint ≥= e.writeNumber)
*/
public boolean complete(WriteEntry writeEntry) {
synchronized (writeQueue) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9cfc5df..b35dbe7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -426,7 +426,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
* Starts the nonce operation for a mutation, if needed.
* @param mutation Mutation.
* @param nonceGroup Nonce group from the request.
- * @returns Nonce used (can be NO_NONCE).
+ * @return Nonce used (can be NO_NONCE).
*/
private long startNonceOperation(final MutationProto mutation, long nonceGroup)
throws IOException, OperationConflictException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
index b5ef319..20a68ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
@@ -123,7 +123,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker {
/**
* Check whether this version should be retained.
* There are 4 variables considered:
- * If this version is past max versions -> skip it
+ * If this version is past max versions -≥ skip it
* If this kv has expired or was deleted, check min versions
* to decide whther to skip it or not.
*
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 1582237..7be1faa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -218,7 +218,7 @@ public class StoreFileReader {
/**
* Check if this storeFile may contain keys within the TimeRange that
* have not expired (i.e. not older than oldestUnexpiredTS).
- * @param timeRange the timeRange to restrict
+ * @param tr the timeRange to restrict
* @param oldestUnexpiredTS the oldest timestamp that is not expired, as
* determined by the column family's TTL
* @return false if queried keys definitely don't exist in this StoreFile
@@ -232,8 +232,8 @@ public class StoreFileReader {
* Checks whether the given scan passes the Bloom filter (if present). Only
* checks Bloom filters for single-row or single-row-column scans. Bloom
* filter checking for multi-gets is implemented as part of the store
- * scanner system (see {@link StoreFileScanner#seekExactly}) and uses
- * the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)}
+ * scanner system (see {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner#seek(org.apache.hadoop.hbase.Cell)}) and
+ * uses the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)}
* and {@link #passesGeneralRowColBloomFilter(Cell)}.
*
* @param scan the scan specification. Used to determine the row, and to
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 080bb95..d3153b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -707,11 +707,11 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
* Next Index Key SEEK_NEXT_COL
*
* Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4
- * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only
- * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at
- * the 'Next Index Key', it would land us in the next block, so we should SEEK. In other scenarios
- * where the SEEK will not land us in the next block, it is very likely better to issues a series
- * of SKIPs.
+ * is ≥ r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we
+ * only want one version of c1, after we have it, a SEEK_COL will be issued to get to c2.
+ * Looking at the 'Next Index Key', it would land us in the next block, so we should SEEK. In
+ * other scenarios where the SEEK will not land us in the next block, it is very likely better
+ * to issues a series of SKIPs.
*/
@VisibleForTesting
protected ScanQueryMatcher.MatchCode optimize(ScanQueryMatcher.MatchCode qcode, Cell cell) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
index df1ddf2..2425a8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
@@ -862,7 +862,6 @@ public class StripeStoreFileManager
/**
* Remove compacted files.
- * @param compactedFiles Compacted files.
*/
private void removeCompactedFiles() throws IOException {
for (StoreFile oldFile : this.compactedFiles) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
index 74c950c..daa99fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
@@ -251,7 +251,6 @@ public class CompactionRequest implements Comparablesyncs futures will return the exception to their clients; some of the
- * edits may have made it out to data nodes but we will report all that were part of
- * this session as failed.
+ * @param name
+ * @param maxHandlersCount
*/
SyncRunner(final String name, final int maxHandlersCount) {
super(name);
@@ -536,7 +530,8 @@ public class FSHLog extends AbstractFSWALcurrentSequence.
+ * Release all SyncFutures whose sequence is ≤= currentSequence.
+ * @param currentSequence
* @param t May be non-null if we are processing SyncFutures because an exception was thrown.
* @return Count of SyncFutures we let go.
*/
@@ -820,9 +815,10 @@ public class FSHLog extends AbstractFSWAL* Herein, we have an array into which we store the sync futures as they come in. When we have a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 0755358..350e698 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -232,13 +232,12 @@ public class ProtobufLogReader extends ReaderBase { * PB_WAL_COMPLETE_MAGIC.size() bytes. In case trailer is not present, it returns false; * otherwise, sets the trailer and sets this.walEditsStopOffset variable up to the point just * before the trailer. - *
- * In case the trailer size > this.trailerMaxSize, it is read after a WARN message. + * In case the trailer size ≥ this.trailerMaxSize, it is read after a WARN message. + *
* @return true if a valid trailer is present * @throws IOException */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 62dea53..5910f8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -396,8 +396,8 @@ class SequenceIdAccounting { /** * Iterates over the given Map and compares sequence ids with corresponding entries in - * {@link #oldestUnflushedRegionSequenceIds}. If a region in - * {@link #oldestUnflushedRegionSequenceIds} has a sequence id less than that passed in + * {@link #lowestUnflushedSequenceIds}. If a region in + * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in *sequenceids then return it.
* @param sequenceids Sequenceids keyed by encoded region name.
* @return regions found in this instance with sequence ids less than those passed in.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
index 5ec218a..e298577 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
@@ -54,8 +54,8 @@ class SyncFuture {
private long txid;
/**
- * The transaction id that was set in here when we were marked done. Should be equal or > txnId.
- * Put this data member into the NOT_DONE state while this class is in use.
+ * The transaction id that was set in here when we were marked done. Should be equal or ≥
+ * txnId. Put this data member into the NOT_DONE state while this class is in use.
*/
private long doneTxid;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
index 559c593..a6d2ab6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
@@ -163,7 +163,7 @@ class AccessControlFilter extends FilterBase {
* @param pbBytes A pb serialized {@link AccessControlFilter} instance
* @return An instance of {@link AccessControlFilter} made from bytes
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
- * @see {@link #toByteArray()}
+ * @see #toByteArray()
*/
public static AccessControlFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java
index d0d9b63..0b2c7b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java
@@ -104,7 +104,7 @@ import com.google.protobuf.RpcController;
/**
* This is a customized version of the polymorphic hadoop
- * {@link ObjectWritable}. It removes UTF8 (HADOOP-414).
+ * {@link org.apache.hadoop.io.ObjectWritable}. It removes UTF8 (HADOOP-414).
* Using {@link Text} intead of UTF-8 saves ~2% CPU between reading and writing
* objects running a short sequentialWrite Performance Evaluation test just in
* ObjectWritable alone; more when we're doing randomRead-ing. Other
@@ -113,7 +113,7 @@ import com.google.protobuf.RpcController;
* if non-Writable classes are introduced -- if passed a Writable for which we
* have no code, we just do the old-school passing of the class name, etc. --
* but passing codes the savings are large particularly when cell
- * data is small (If < a couple of kilobytes, the encoding/decoding of class
+ * data is small (If < a couple of kilobytes, the encoding/decoding of class
* name and reflection to instantiate class was costing in excess of the cell
* handling).
* @deprecated This class is needed migrating TablePermissions written with
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
index 25cfc8b..5543c33 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
@@ -83,7 +83,7 @@ public class TableAuthManager implements Closeable {
/**
* Returns a combined map of user and group permissions, with group names
- * distinguished according to {@link AuthUtil.isGroupPrincipal}
+ * distinguished according to {@link AuthUtil#isGroupPrincipal}
*/
public ListMultimap
* create
* @throws Exception
- * @see {@link #shutdownMiniCluster()}
+ * @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@@ -910,7 +910,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* start minicluster
* @throws Exception
- * @see {@link #shutdownMiniCluster()}
+ * @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@@ -932,13 +932,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* subdirectory in a directory under System property test.build.data.
* Directory is cleaned up on exit.
* @param numMasters Number of masters to start up. We'll start this many
- * hbase masters. If numMasters > 1, you can find the active/primary master
+ * hbase masters. If numMasters ≥ 1, you can find the active/primary master
* with {@link MiniHBaseCluster#getMaster()}.
* @param numSlaves Number of slaves to start up. We'll start this many
* regionservers. If dataNodeHosts == null, this also indicates the number of
* datanodes to start. If dataNodeHosts != null, the number of datanodes is
* based on dataNodeHosts.length.
- * If numSlaves is > 1, then make sure
+ * If numSlaves is ≥ 1, then make sure
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @param dataNodeHosts hostnames DNs to run on.
@@ -947,7 +947,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* If you start MiniDFSCluster without host names,
* all instances of the datanodes will have the same host name.
* @throws Exception
- * @see {@link #shutdownMiniCluster()}
+ * @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@@ -971,13 +971,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* subdirectory in a directory under System property test.build.data.
* Directory is cleaned up on exit.
* @param numMasters Number of masters to start up. We'll start this many
- * hbase masters. If numMasters > 1, you can find the active/primary master
+ * hbase masters. If numMasters ≥ 1, you can find the active/primary master
* with {@link MiniHBaseCluster#getMaster()}.
* @param numSlaves Number of slaves to start up. We'll start this many
* regionservers. If dataNodeHosts == null, this also indicates the number of
* datanodes to start. If dataNodeHosts != null, the number of datanodes is
* based on dataNodeHosts.length.
- * If numSlaves is > 1, then make sure
+ * If numSlaves is ≥ 1, then make sure
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @param dataNodeHosts hostnames DNs to run on.
@@ -989,7 +989,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @param regionserverClass The class to use as HRegionServer, or null for
* default
* @throws Exception
- * @see {@link #shutdownMiniCluster()}
+ * @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@@ -1070,7 +1070,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return Reference to the hbase mini hbase cluster.
* @throws IOException
* @throws InterruptedException
- * @see {@link #startMiniCluster()}
+ * @see #startMiniCluster()
*/
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves, Class extends HMaster> masterClass,
@@ -1152,7 +1152,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Stops mini hbase, zk, and hdfs clusters.
* @throws IOException
- * @see {@link #startMiniCluster(int)}
+ * @see #startMiniCluster(int)
*/
public void shutdownMiniCluster() throws Exception {
LOG.info("Shutting down minicluster");
@@ -1841,9 +1841,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @param tableName
* @param startKey
* @param stopKey
- * @param callingMethod
- * @param conf
* @param isReadOnly
+ * @param durability
+ * @param wal
* @param families
* @return A region on which you must call
* {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done.
@@ -2004,7 +2004,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
/** A tracker for tracking and validating table rows
- * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])}
+ * generated with
+ * {@link org.apache.hadoop.hbase.HBaseTestingUtility#loadTable(org.apache.hadoop.hbase.client.Table, byte[])}
*/
public static class SeenRowTracker {
int dim = 'z' - 'a' + 1;
@@ -2240,7 +2241,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return digest.toString();
}
- /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
+ /** All the row values for the data loaded by
+ * {@link org.apache.hadoop.hbase.HBaseTestingUtility#loadTable(org.apache.hadoop.hbase.client.Table, byte[])} */
public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
static {
int i = 0;
@@ -3014,7 +3016,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the
* regions have been all assigned.
- * @see #waitTableEnabled(Admin, byte[], long)
+ * @see #waitTableEnabled(TableName, long)
* @param table Table to wait on.
* @param timeoutMillis Time to wait on it being marked enabled.
* @throws InterruptedException
@@ -3203,9 +3205,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Uses directly the assignment manager to assign the region.
* and waits until the specified region has completed assignment.
- * @param tableName the table name
+ * @param regionInfo the region info instance
* @throws IOException
- * @throw InterruptedException
+ * @throws InterruptedException
* @return true if the region is assigned false otherwise.
*/
public boolean assignRegion(final HRegionInfo regionInfo)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index e5aec57..ab3b649 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -145,7 +145,7 @@ public class HFilePerformanceEvaluation {
}
/**
- * Write a test HFile with the given codec & cipher
+ * Write a test HFile with the given codec & cipher
* @param conf
* @param fs
* @param mf
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
index cea10ebd..1f0d906 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
@@ -156,8 +156,8 @@ public abstract class MultithreadedTestUtil {
* Used for unit tests that spawn threads. E.g.,
*
- * Listconf
* configuration instance. Minimally the mock will return
- * conf when {@link ClusterConnection#getConfiguration()} is invoked.
+ * conf when {@link ClusterConnection#getConfiguration()} is invoked.
* Be sure to shutdown the connection when done by calling
* {@link Connection#close()} else it will stick around; this is probably not what you want.
* @param conf configuration
@@ -141,8 +141,8 @@ public class HConnectionTestingUtility {
* @param conf configuration
* @return ClusterConnection object for conf
* @throws ZooKeeperConnectionException
- * @see @link
- * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
+ * @see
+ * spy(T)
*/
public static ClusterConnection getSpiedConnection(final Configuration conf)
throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index ca4b609..36cafad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -2400,7 +2400,7 @@ public class TestFromClientSide {
/**
* HBASE-1182
- * Scan for columns > some timestamp
+ * Scan for columns ≥ some timestamp
*/
@Test
public void testJiraTest1182() throws Exception {
@@ -2847,7 +2847,7 @@ public class TestFromClientSide {
/**
* Verify a single column using gets.
* Expects family and qualifier arrays to be valid for at least
- * the range: idx-2 < idx < idx+2
+ * the range: idx-2 ≤ idx ≤ idx+2
*/
private void getVerifySingleColumn(Table ht,
byte [][] ROWS, int ROWIDX,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 1b20b76..a192e93 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -320,7 +320,7 @@ public class TestHCM {
* individual timeout is fine. We do that with:
* - client side: an operation timeout of 30 seconds
* - server side: we sleep 20 second at each attempt. The first work fails, the second one
- * succeeds. But the client won't wait that much, because 20 + 20 > 30, so the client
+ * succeeds. But the client won't wait that much, because 20 + 20 ≥ 30, so the client
* timeouted when the server answers.
*/
@Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
index 56f01c3..cd321b1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
@@ -39,7 +39,7 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
- * Run tests related to {@link TimestampsFilter} using HBase client APIs.
+ * Run tests related to {@link org.apache.hadoop.hbase.filter.TimestampsFilter} using HBase client APIs.
* Sets up the HBase mini cluster once at start. Each creates a table
* named for the method and does its stuff against that.
*/
@@ -401,7 +401,7 @@ public class TestMultipleTimestamps {
/**
* Assert that the passed in KeyValue has expected contents for the
- * specified row, column & timestamp.
+ * specified row, column & timestamp.
*/
private void checkOneCell(Cell kv, byte[] cf,
int rowIdx, int colIdx, long ts) {
@@ -427,7 +427,7 @@ public class TestMultipleTimestamps {
/**
* Uses the TimestampFilter on a Get to request a specified list of
- * versions for the row/column specified by rowIdx & colIdx.
+ * versions for the row/column specified by rowIdx & colIdx.
*
*/
private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
index c803752..5265701 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
@@ -286,7 +286,7 @@ public class TestTimestampsFilter {
/**
* Assert that the passed in KeyValue has expected contents for the
- * specified row, column & timestamp.
+ * specified row, column & timestamp.
*/
private void checkOneCell(Cell kv, byte[] cf,
int rowIdx, int colIdx, long ts) {
@@ -312,7 +312,7 @@ public class TestTimestampsFilter {
/**
* Uses the TimestampFilter on a Get to request a specified list of
- * versions for the row/column specified by rowIdx & colIdx.
+ * versions for the row/column specified by rowIdx & colIdx.
*
*/
private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java
index 4fe0d23..9002a99 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java
@@ -184,7 +184,7 @@ public class TestAggregateProtocol {
}
/**
- * This will test the row count with startrow > endrow. The result should be
+ * This will test the row count with startrow ≥ endrow. The result should be
* -1.
* @throws Throwable
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
index 8da76a6..f41ec38 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
@@ -531,7 +531,7 @@ public class TestWALObserver {
* Copied from HRegion.
*
* @param familyMap
- * map of family->edits
+ * map of family-≥edits
* @param walEdit
* the destination entry to append into
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
index 6642638..711f53b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java
@@ -338,7 +338,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
}
/**
- * Custom user->group mapping service.
+ * Custom user-≥group mapping service.
*/
public static class MyGroupsProvider extends ShellBasedUnixGroupsMapping {
static Map