diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 812e4bf..304e698 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -477,7 +477,7 @@ class AsyncProcess { /** * Helper that is used when grouping the actions per region server. * - * @param loc - the destination. Must not be null. + * @param server - the destination. Must not be null. * @param action - the action to add to the multiaction * @param actionsByServer the multiaction per server * @param nonceGroup Nonce group. @@ -1839,7 +1839,7 @@ class AsyncProcess { /** * Only used w/useGlobalErrors ctor argument, for HTable backward compat. * @return Whether there were any errors in any request since the last time - * {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created. + * {@link #waitForAllPreviousOpsAndReset(java.util.List, String)} was called, or AP was created. */ public boolean hasError() { return globalErrors.hasErrors(); @@ -1850,9 +1850,9 @@ class AsyncProcess { * Waits for all previous operations to finish, and returns errors and (optionally) * failed operations themselves. * @param failedRows an optional list into which the rows that failed since the last time - * {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created, are saved. + * {@link #waitForAllPreviousOpsAndReset(java.util.List, String)} was called, or AP was created, are saved. * @param tableName name of the table - * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List)} + * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(java.util.List, String)} * was called, or AP was created. */ public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 9efb33d..db6566c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -177,7 +177,7 @@ class ClusterStatusListener implements Closeable { /** - * An implementation using a multicast message between the master & the client. + * An implementation using a multicast message between the master & the client. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) class MulticastListener implements Listener { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index c886b34..afe3566 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -78,7 +78,7 @@ public class Delete extends Mutation implements Comparable { * associated with the specified row (all versions of all columns in all * families), with timestamp from current point in time to the past. * Cells defining timestamp for a future point in time - * (timestamp > current time) will not be deleted. + * (timestamp ≥ current time) will not be deleted. * @param row row key */ public Delete(byte [] row) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 29650ef..888f7a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1966,7 +1966,7 @@ public class HBaseAdmin implements Admin { } /** - * Do a get with a timeout against the passed in future. + * Do a get with a timeout against the passed in future. */ private static T get(final Future future, final long timeout, final TimeUnit units) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 1892f54..d90fda3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -426,9 +426,10 @@ public class Scan extends Query { * simply increment the last byte of the array. * But if your application uses real binary rowids you may run into the scenario that your * prefix is something like:

- *    { 0x12, 0x23, 0xFF, 0xFF }
- * Then this stopRow needs to be fed into the actual scan
- *    { 0x12, 0x24 } (Notice that it is shorter now)
+ *    { 0x12, 0x23, 0xFF, 0xFF } + * + *
Then this stopRow needs to be fed into the actual scan
+ *    { 0x12, 0x24 } (Notice that it is shorter now) * This method calculates the correct stop row value for this usecase. * * @param rowKeyPrefix the rowKeyPrefix. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java index d09ea2c..2ec87a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java @@ -129,7 +129,7 @@ public class PrefixFilter extends FilterBase { } /** - * @param other + * @param o * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index 64eedfb..17cd230 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -63,7 +63,7 @@ public class ReplicationQueueInfo { /** * Parse dead server names from znode string servername can contain "-" such as * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following - * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-... + * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name≥-... */ private static void extractDeadServersFromZNodeString(String deadServerListStr, List result) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java index 61bb041..58f6d15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTableBase.java @@ -370,7 +370,6 @@ abstract class ReplicationTableBase { * Builds the Replication Table in a background thread. Any method accessing the Replication Table * should do so through getOrBlockOnReplicationTable() * - * @return the Replication Table * @throws IOException if the Replication Table takes too long to build */ private void createReplicationTableInBackground() throws IOException { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index fed20c4..6a803bc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Represents an interval of version timestamps. Presumes timestamps between * {@link #INITIAL_MIN_TIMESTAMP} and {@link #INITIAL_MAX_TIMESTAMP} only. Gets freaked out if - * passed a timestamp that is < {@link #INITIAL_MIN_TIMESTAMP}, + * passed a timestamp that is ≤ {@link #INITIAL_MIN_TIMESTAMP}, *

* Evaluated according to minStamp <= timestamp < maxStamp * or [minStamp,maxStamp) in interval notation. @@ -89,7 +89,7 @@ public class TimeRange { * Represents interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive - * @throws IllegalArgumentException if either <0, + * @throws IllegalArgumentException if either ≤ 0, * @deprecated This is made @InterfaceAudience.Private in the 2.0 line and above */ @Deprecated diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java index 6264a5e..a8fb677 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.java @@ -37,7 +37,7 @@ import org.junit.runner.notification.RunListener; * - threads * - open file descriptor * - max open file descriptor - *

+ *
* When surefire forkMode=once/always/perthread, this code is executed on the forked process. */ public class ResourceCheckerJUnitListener extends RunListener { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java index cd31ce5..c4d9f9b 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java @@ -42,7 +42,6 @@ public final class Waiter { * in {@link #sleep(Configuration, long)}, {@link #waitFor(Configuration, long, Predicate)}, * {@link #waitFor(Configuration, long, long, Predicate)}, and * {@link #waitFor(Configuration, long, long, boolean, Predicate)} method - *

* The actual time out value will equal to hbase.test.wait.for.ratio * passed-in timeout */ public static final String HBASE_TEST_WAIT_FOR_RATIO = "hbase.test.wait.for.ratio"; @@ -59,10 +58,10 @@ public final class Waiter { * {@link #waitFor(Configuration, long, Predicate)}, * {@link #waitFor(Configuration, long, long, Predicate)} and * {@link #waitFor(Configuration, long, long, boolean, Predicate)} methods of the class - *

+ *

* This is useful to dynamically adjust max time out values when same test cases run in different - * test machine settings without recompiling & re-deploying code. - *

+ * test machine settings without recompiling & re-deploying code. + *

* The value is obtained from the Java System property or configuration setting * hbase.test.wait.for.ratio which defaults to 1. * @param conf the configuration @@ -83,7 +82,7 @@ public final class Waiter { /** * A predicate 'closure' used by the {@link Waiter#waitFor(Configuration, long, Predicate)} and * {@link Waiter#waitFor(Configuration, long, Predicate)} and - * {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate) methods. + * {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)} methods. */ @InterfaceAudience.Private public interface Predicate { @@ -131,7 +130,6 @@ public final class Waiter { * {@link #getWaitForRatio(Configuration)} for the given {@link Predicate} to become * true, failing the test if the timeout is reached and the Predicate is still * false. - *

* @param conf the configuration * @param timeout the timeout in milliseconds to wait for the predicate. * @param predicate the predicate to evaluate. @@ -148,7 +146,6 @@ public final class Waiter { * {@link #getWaitForRatio(Configuration)} for the given {@link Predicate} to become * true, failing the test if the timeout is reached and the Predicate is still * false. - *

* @param conf the configuration * @param timeout the max timeout in milliseconds to wait for the predicate. * @param interval the interval in milliseconds to evaluate predicate. @@ -166,7 +163,6 @@ public final class Waiter { * {@link #getWaitForRatio(Configuration)} for the given {@link Predicate} to become * true, failing the test if the timeout is reached, the Predicate is still * false and failIfTimeout is set as true. - *

* @param conf the configuration * @param timeout the timeout in milliseconds to wait for the predicate. * @param interval the interval in milliseconds to evaluate predicate. diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java index b6f1aeb..5a66e2a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java @@ -39,7 +39,7 @@ import org.apache.hadoop.util.Shell; /** * A default cluster manager for HBase. Uses SSH, and hbase shell scripts * to manage the cluster. Assumes Unix-like commands are available like 'ps', - * 'kill', etc. Also assumes the user running the test has enough "power" to start & stop + * 'kill', etc. Also assumes the user running the test has enough "power" to start & stop * servers on the remote machines (for example, the test user could be the same user as the * user the daemon is running as) */ diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java index d3433c7..0b56727 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java @@ -35,10 +35,10 @@ import org.junit.Before; /** * Base class for HBase integration tests that want to use the Chaos Monkey. - * Usage: bin/hbase + * Usage: bin/hbase ≤sub_class_of_IntegrationTestBase≥ ≤options≥ * Options: -h,--help Show usage - * -m,--monkey Which chaos monkey to run - * -monkeyProps The properties file for specifying chaos monkey properties. + * -m,--monkey ≤arg≥ Which chaos monkey to run + * -monkeyProps ≤arg≥ The properties file for specifying chaos monkey properties. * -ncc Option to not clean up the cluster at the end. */ public abstract class IntegrationTestBase extends AbstractHBaseTool { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java index 92c65d8..e007bf8 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java @@ -48,12 +48,12 @@ import org.junit.experimental.categories.Category; /** * - * Integration test that verifies Procedure V2.

+ * Integration test that verifies Procedure V2. * * DDL operations should go through (rollforward or rollback) when primary master is killed by - * ChaosMonkey (default MASTER_KILLING)

+ * ChaosMonkey (default MASTER_KILLING)
* - * Multiple Worker threads are started to randomly do the following Actions in loops:
+ * Multiple Worker threads are started to randomly do the following Actions in loops:
* Actions generating and populating tables: *

    *
  • CreateTableAction
  • @@ -75,7 +75,7 @@ import org.junit.experimental.categories.Category; *
  • AlterNamespaceAction
  • *
  • DeleteNamespaceAction
  • *
- *
+ *
* * The threads run for a period of time (default 20 minutes) then are stopped at the end of * runtime. Verification is performed towards those checkpoints: diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java index 98d53e9..5b3c02e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java @@ -51,12 +51,12 @@ import com.google.common.collect.Lists; * with the replication of the edits before read_delay_ms to the given region replica id so that * the read and verify will not fail. * - * The job will run for at least given runtime (default 10min) by running a concurrent + * The job will run for at least given runtime (default 10min) by running a concurrent * writer and reader workload followed by a concurrent updater and reader workload for * num_keys_per_server. - *

+ *
* Example usage: - *

+ * 

* hbase org.apache.hadoop.hbase.IntegrationTestRegionReplicaReplication * -DIntegrationTestRegionReplicaReplication.num_keys_per_server=10000 * -Dhbase.IntegrationTestRegionReplicaReplication.runtime=600000 @@ -65,7 +65,7 @@ import com.google.common.collect.Lists; * -DIntegrationTestRegionReplicaReplication.region_replica_id=2 * -DIntegrationTestRegionReplicaReplication.num_read_threads=100 * -DIntegrationTestRegionReplicaReplication.num_write_threads=100 - *

+ *

*/ @Category(IntegrationTests.class) public class IntegrationTestRegionReplicaReplication extends IntegrationTestIngest { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index f00127f..a359c51 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -91,14 +91,14 @@ import java.util.concurrent.atomic.AtomicLong; * It starts an MR job that creates linked chains * * The format of rows is like this: - * Row Key -> Long + * Row Key -≥ Long * - * L:<< Chain Id >> -> Row Key of the next link in the chain - * S:<< Chain Id >> -> The step in the chain that his link is. - * D:<< Chain Id >> -> Random Data. + * L:≤≤ Chain Id ≥≥ -≥ Row Key of the next link in the chain + * S:≤≤ Chain Id ≥≥ -≥ The step in the chain that his link is. + * D:≤≤ Chain Id ≥≥ -≥ Random Data. * * All chains start on row 0. - * All rk's are > 0. + * All rk's are ≥ 0. * * After creating the linked lists they are walked over using a TableMapper based Mapreduce Job. * diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java index 1a152e8..abd72fc 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java @@ -43,11 +43,11 @@ import org.junit.experimental.categories.Category; * 'aaa' to 'zzz', and for each row, sets the columns f1:(null) and f2:(null) to be * the the same as the row value. *
- * aaa, f1: => aaa
- * aaa, f2: => aaa
- * aab, f1: => aab
+ * aaa, f1: =≥ aaa
+ * aaa, f2: =≥ aaa
+ * aab, f1: =≥ aab
  * ....
- * zzz, f2: => zzz
+ * zzz, f2: =≥ zzz
  * 
* * Then the test creates a snapshot from this table, and overrides the values in the original diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 437f200..9a69e37 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -79,29 +79,28 @@ import com.google.common.base.Objects; * different threads: *
    *
  1. - * Load Test Tool.
    + * Load Test Tool.
    * This runs so that all RegionServers will have some load and WALs will be full. *
  2. *
  3. - * Scan thread.
    + * Scan thread.
    * This thread runs a very short scan over and over again recording how log it takes to respond. * The longest response is assumed to be the time it took to recover. *
  4. *
  5. - * Put thread.
    + * Put thread.
    * This thread just like the scan thread except it does a very small put. *
  6. *
  7. - * Admin thread.
    + * Admin thread.
    * This thread will continually go to the master to try and get the cluster status. Just like the * put and scan threads, the time to respond is recorded. *
  8. *
  9. - * Chaos Monkey thread.
    + * Chaos Monkey thread.
    * This thread runs a ChaosMonkey.Action. *
  10. *
- *

* The ChaosMonkey actions currently run are: *

    *
  • Restart the RegionServer holding meta.
  • @@ -110,7 +109,6 @@ import com.google.common.base.Objects; *
  • Move the Regions of the table used by the scan and put threads.
  • *
  • Restart the master.
  • *
- *

* At the end of the test a log line is output on the INFO level containing the timing data that was * collected. */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index 280a9c2..919a88d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -904,7 +904,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } /** - * Remove all logs with logId <= {@code lastLogId}. + * Remove all logs with logId ≤= {@code lastLogId}. */ private void removeAllLogs(long lastLogId) { if (logs.size() <= 1) return; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index e207735..2d5605b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -106,7 +106,7 @@ import org.apache.hadoop.util.ToolRunner; * href="http://labs.google.com/papers/bigtable.html">Bigtable * paper, pages 8-10. * - *

If number of clients > 1, we start up a MapReduce job. Each map task + *

If number of clients ≥ 1, we start up a MapReduce job. Each map task * runs an individual client. Each client does about 1GB of data. */ public class PerformanceEvaluation extends Configured implements Tool { @@ -779,8 +779,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation.Test - * tests}. This makes the reflection logic a little easier to understand... + * Wraps up options passed to {@link org.apache.hadoop.hbase.rest.PerformanceEvaluation.TestOptions}. + * This makes the reflection logic a little easier to understand... */ static class TestOptions { private int startRow; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java index 530a323..9e658b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthChecker.java @@ -54,7 +54,8 @@ class HealthChecker { /** * Initialize. * - * @param configuration + * @param location + * @param timeout */ public void init(String location, long timeout) { this.healthCheckScript = location; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 63d88ef..0b8bf84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -67,7 +67,7 @@ class HFileArchiveManager { /** * Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next - * pass of the {@link HFileCleaner}, if the HFiles are retained by another cleaner. + * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another cleaner. * @param table name of the table for which to disable hfile retention. * @return this for chaining. * @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java index f5fc979..4425075 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignExceptionDispatcher.java @@ -96,7 +96,6 @@ public class ForeignExceptionDispatcher implements ForeignExceptionListener, For /** * Sends an exception to all listeners. - * @param message human readable message passed to the listener * @param e {@link ForeignException} containing the cause. Can be null. */ private void dispatch(ForeignException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index b4f1953..411ad74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -221,7 +221,6 @@ public class HFileSystem extends FilterFileSystem { /** * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient * linked to this FileSystem. See HBASE-6435 for the background. - *

* There should be no reason, except testing, to create a specific ReorderBlocks. * * @return true if the interceptor was added, false otherwise. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 0546253..70af2c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -683,7 +683,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper { /** * Creates a new SaslClientCallbackHandler. * @param userName SASL user name - * @Param password SASL password + * @param password SASL password */ public SaslClientCallbackHandler(String userName, char[] password) { this.password = password; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index e2af762..02d881e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -126,8 +126,7 @@ public class CellCounter extends Configured implements Tool { * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, - * org.apache.hadoop.mapreduce.Mapper.Context) + * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context) */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index aca84fd..c5e0f44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -76,8 +76,7 @@ public class RowCounter extends Configured implements Tool { * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, - * org.apache.hadoop.mapreduce.Mapper.Context) + * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context) */ @Override public void map(ImmutableBytesWritable row, Result values, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 9ebb3c1..fcbfe0b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -136,7 +136,7 @@ extends InputFormat { private Connection connection; - /** The reverse DNS lookup cache mapping: IPAddress => HostName */ + /** The reverse DNS lookup cache mapping: IPAddress =≥ HostName */ private HashMap reverseDNSCacheMap = new HashMap(); @@ -471,6 +471,7 @@ extends InputFormat { * false * 13, -19, 126, -65 * + * Example for selection of Split point * * * Set this function as "public static", make it easier for test. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index 37e4e44..db9233e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -925,7 +925,7 @@ public class TableMapReduceUtil { * Add entries to packagedClasses corresponding to class files * contained in jar. * @param jar The jar who's content to list. - * @param packagedClasses map[class -> jar] + * @param packagedClasses map[class -≥ jar] */ private static void updateMap(String jar, Map packagedClasses) throws IOException { if (null == jar || jar.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 02fcbba..40aa0fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -55,7 +55,7 @@ public class WALInputFormat extends InputFormat { public static final String END_TIME_KEY = "wal.end.time"; /** - * {@link InputSplit} for {@link WAL} files. Each split represent + * {@link InputSplit} for {@link org.apache.hadoop.hbase.wal.WAL} files. Each split represent * exactly one log file. */ static class WALSplit extends InputSplit implements Writable { @@ -129,7 +129,7 @@ public class WALInputFormat extends InputFormat { } /** - * {@link RecordReader} for an {@link WAL} file. + * {@link RecordReader} for an {@link org.apache.hadoop.hbase.wal.WAL} file. * Implementation shared with deprecated HLogInputFormat. */ static abstract class WALRecordReader extends RecordReader { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index b95b894..ffd804a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -88,7 +88,7 @@ public class RegionStates { new HashMap(); /** - * Holds mapping of table -> region state + * Holds mapping of table -≥ region state */ private final Map> regionStatesTableIndex = new HashMap>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 612a8d0..8c6e411 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -449,7 +449,6 @@ public class ServerManager { * Adds the onlineServers list. onlineServers should be locked. * @param serverName The remote servers name. * @param sl - * @return Server load from the removed server, if any. */ @VisibleForTesting void recordNewServerWithLock(final ServerName serverName, final ServerLoad sl) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 4dfa391..a0244de 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -222,7 +222,7 @@ public class TableNamespaceManager { /** * Create Namespace in a blocking manner. Keeps trying until - * {@link ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires. + * {@link ClusterSchema#DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT} expires. * Note, by-passes notifying coprocessors and name checks. Use for system namespaces only. */ private void blockingCreateNamespace(final NamespaceDescriptor namespaceDescriptor) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index dc5bace..7dcf2ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -583,8 +583,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer { /** * Return true if the placement of region on server would lower the availability * of the region in question - * @param server - * @param region + * @param regionInfo + * @param serverName * @return true or false */ boolean wouldLowerAvailability(HRegionInfo regionInfo, ServerName serverName) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index a6724ee..947c0ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -104,7 +104,6 @@ class RegionLocationFinder { /** * Create a cache for region to list of servers - * @param time time to cache the locations * @return A new Cache. */ private LoadingCache createCache() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 181990b..2f3b1d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -1364,7 +1364,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { /** * A cost function for region replicas. We give a very high cost to hosting * replicas of the same region in the same host. We do not prevent the case - * though, since if numReplicas > numRegionServers, we still want to keep the + * though, since if numReplicas ≥ numRegionServers, we still want to keep the * replica open. */ static class RegionReplicaHostCostFunction extends CostFunction { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java index d39267b..e7400c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java @@ -81,7 +81,7 @@ import org.apache.zookeeper.KeeperException; * The key of the input is a file name, the value is a collection of KeyValues * (the value format of KeyValue is valueLength + fileName) in HBase. * In this reducer, we could know how many cells exist in HBase for a mob file. - * If the existCellSize/mobFileSize < compactionRatio, this mob + * If the existCellSize/mobFileSize ≤ compactionRatio, this mob * file needs to be merged. */ @InterfaceAudience.Private @@ -420,7 +420,7 @@ public class SweepReducer extends Reducer { * @param compactionRatio compactionRatio the invalid ratio. * If there're too many cells deleted in a mob file, it's regarded as invalid, * and needs to be written to a new one. - * If existingCellSize/fileSize < compactionRatio, it's regarded as a invalid one. + * If existingCellSize/fileSize ≤ compactionRatio, it's regarded as a invalid one. * @param compactionMergeableSize compactionMergeableSize If the size of a mob file is less * than this value, it's regarded as a small file and needs to be merged */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 9980044..05319d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -158,7 +158,7 @@ public class TaskMonitor { * This class encapsulates an object as well as a weak reference to a proxy * that passes through calls to that object. In art form: *

-   *     Proxy  <------------------
+   *     Proxy  ≤------------------
    *       |                       \
    *       v                        \
    * PassthroughInvocationHandler   |  weak reference
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
index 523b056..dbcfd4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java
@@ -75,7 +75,7 @@ class NamespaceStateManager {
   /**
    * Check if adding a region violates namespace quota, if not update namespace cache.
    *
-   * @param TableName
+   * @param name name of the table
    * @param regionName
    * @param incr
    * @return true, if region can be added to table.
@@ -108,7 +108,7 @@ class NamespaceStateManager {
   
   /**
    * Check and update region count for an existing table. To handle scenarios like restore snapshot
-   * @param TableName name of the table for region count needs to be checked and updated
+   * @param name name of the table for region count needs to be checked and updated
    * @param incr count of regions
    * @throws QuotaExceededException if quota exceeds for the number of regions allowed in a
    *           namespace
@@ -183,7 +183,7 @@ class NamespaceStateManager {
   /**
    * Delete the namespace state.
    *
-   * @param An instance of NamespaceTableAndRegionInfo
+   * @param namespace instance of NamespaceTableAndRegionInfo
    */
   void deleteNamespace(String namespace) {
     this.nsStateCache.remove(namespace);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 89e723e..18bafad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1684,7 +1684,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * A split takes the config from the parent region & passes it to the daughter
+   * A split takes the config from the parent region & passes it to the daughter
    * region's constructor. If 'conf' was passed, you would end up using the HTD
    * of the parent region in addition to the new daughter HTD. Pass 'baseConf'
    * to the daughter regions to avoid this tricky dedupe problem.
@@ -3767,7 +3767,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * Atomically apply the given map of family->edits to the memstore.
+   * Atomically apply the given map of family-≥edits to the memstore.
    * This handles the consistency control on its own, but the caller
    * should already have locked updatesLock.readLock(). This also does
    * not check the families for validity.
@@ -3862,9 +3862,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * Append the given map of family->edits to a WALEdit data structure.
+   * Append the given map of family-≥edits to a WALEdit data structure.
    * This does not write to the WAL itself.
-   * @param familyMap map of family->edits
+   * @param familyMap map of family-≥edits
    * @param walEdit the destination entry to append into
    */
   private void addFamilyMapToWALEdit(Map> familyMap,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index a69d8c0..98d93b8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -723,7 +723,7 @@ class MemStoreFlusher implements FlushRequester {
 
     /**
      * @param maximumWait
-     * @return True if we have been delayed > maximumWait milliseconds.
+     * @return True if we have been delayed ≥ maximumWait milliseconds.
      */
     public boolean isMaximumWait(final long maximumWait) {
       return (EnvironmentEdgeManager.currentTime() - this.createTime) > maximumWait;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
index 7424e4e..d2e78e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
@@ -162,7 +162,7 @@ public class MultiVersionConcurrencyControl {
    *
    * @param writeEntry
    *
-   * @return true if e is visible to MVCC readers (that is, readpoint >= e.writeNumber)
+   * @return true if e is visible to MVCC readers (that is, readpoint ≥= e.writeNumber)
    */
   public boolean complete(WriteEntry writeEntry) {
     synchronized (writeQueue) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9cfc5df..b35dbe7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -426,7 +426,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
    * Starts the nonce operation for a mutation, if needed.
    * @param mutation Mutation.
    * @param nonceGroup Nonce group from the request.
-   * @returns Nonce used (can be NO_NONCE).
+   * @return Nonce used (can be NO_NONCE).
    */
   private long startNonceOperation(final MutationProto mutation, long nonceGroup)
       throws IOException, OperationConflictException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
index b5ef319..20a68ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
@@ -123,7 +123,7 @@ public class ScanWildcardColumnTracker implements ColumnTracker {
   /**
    * Check whether this version should be retained.
    * There are 4 variables considered:
-   * If this version is past max versions -> skip it
+   * If this version is past max versions -≥ skip it
    * If this kv has expired or was deleted, check min versions
    * to decide whther to skip it or not.
    *
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 1582237..7be1faa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -218,7 +218,7 @@ public class StoreFileReader {
   /**
    * Check if this storeFile may contain keys within the TimeRange that
    * have not expired (i.e. not older than oldestUnexpiredTS).
-   * @param timeRange the timeRange to restrict
+   * @param tr the timeRange to restrict
    * @param oldestUnexpiredTS the oldest timestamp that is not expired, as
    *          determined by the column family's TTL
    * @return false if queried keys definitely don't exist in this StoreFile
@@ -232,8 +232,8 @@ public class StoreFileReader {
    * Checks whether the given scan passes the Bloom filter (if present). Only
    * checks Bloom filters for single-row or single-row-column scans. Bloom
    * filter checking for multi-gets is implemented as part of the store
-   * scanner system (see {@link StoreFileScanner#seekExactly}) and uses
-   * the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)}
+   * scanner system (see {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner#seek(org.apache.hadoop.hbase.Cell)}) and
+   * uses the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)}
    * and {@link #passesGeneralRowColBloomFilter(Cell)}.
    *
    * @param scan the scan specification. Used to determine the row, and to
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 080bb95..d3153b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -707,11 +707,11 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner
    *                                    Next Index Key        SEEK_NEXT_COL
    * 
* Now imagine we want columns c1 and c3 (see first diagram above), the 'Next Index Key' of r1/c4 - * is > r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we only - * want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. Looking at - * the 'Next Index Key', it would land us in the next block, so we should SEEK. In other scenarios - * where the SEEK will not land us in the next block, it is very likely better to issues a series - * of SKIPs. + * is ≥ r1/c3 so we should seek to get to the c1 on the next row, r2. In second case, say we + * only want one version of c1, after we have it, a SEEK_COL will be issued to get to c2. + * Looking at the 'Next Index Key', it would land us in the next block, so we should SEEK. In + * other scenarios where the SEEK will not land us in the next block, it is very likely better + * to issues a series of SKIPs. */ @VisibleForTesting protected ScanQueryMatcher.MatchCode optimize(ScanQueryMatcher.MatchCode qcode, Cell cell) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index df1ddf2..2425a8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -862,7 +862,6 @@ public class StripeStoreFileManager /** * Remove compacted files. - * @param compactedFiles Compacted files. */ private void removeCompactedFiles() throws IOException { for (StoreFile oldFile : this.compactedFiles) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 74c950c..daa99fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -251,7 +251,6 @@ public class CompactionRequest implements Comparable { /** * Recalculate the size of the compaction based on current files. - * @param files files that should be included in the compaction */ private void recalculateSize() { long sz = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index f0cb5d2..965d2d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -157,7 +157,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { /** * Check that all files satisfy the constraint - * FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio. + * FileSize(i) ≤= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio. * * @param files List of store files to consider as a compaction candidate. * @param currentRatio The ratio to use. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 3386bfd..7e619a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -119,20 +119,19 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { * First exclude bulk-load files if indicated in configuration. * Start at the oldest file and stop when you find the first file that * meets compaction criteria: - * (1) a recently-flushed, small file (i.e. <= minCompactSize) + * (1) a recently-flushed, small file (i.e. ≤= minCompactSize) * OR * (2) within the compactRatio of sum(newer_files) * Given normal skew, any newer files will also meet this criteria - *

* Additional Note: - * If fileSizes.size() >> maxFilesToCompact, we will recurse on + * If fileSizes.size() ≥≥ maxFilesToCompact, we will recurse on * compact(). Consider the oldest files first to avoid a * situation where we always compact [end-threshold,end). Then, the * last file becomes an aggregate of the previous compactions. * * normal skew: * - * older ----> newer (increasing seqID) + * older ----≥ newer (increasing seqID) * _ * | | _ * | | | | _ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index cdf5757..585d44c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -487,14 +487,8 @@ public class FSHLog extends AbstractFSWAL { /** * UPDATE! - * @param syncs the batch of calls to sync that arrived as this thread was starting; when done, - * we will put the result of the actual hdfs sync call as the result. - * @param sequence The sequence number on the ring buffer when this thread was set running. If - * this actual writer sync completes then all appends up this point have been - * flushed/synced/pushed to datanodes. If we fail, then the passed in - * syncs futures will return the exception to their clients; some of the - * edits may have made it out to data nodes but we will report all that were part of - * this session as failed. + * @param name + * @param maxHandlersCount */ SyncRunner(final String name, final int maxHandlersCount) { super(name); @@ -536,7 +530,8 @@ public class FSHLog extends AbstractFSWAL { } /** - * Release all SyncFutures whose sequence is <= currentSequence. + * Release all SyncFutures whose sequence is ≤= currentSequence. + * @param currentSequence * @param t May be non-null if we are processing SyncFutures because an exception was thrown. * @return Count of SyncFutures we let go. */ @@ -820,9 +815,10 @@ public class FSHLog extends AbstractFSWAL { * To start up the drama, Thread A creates an instance of this class each time it would do this * zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only). * Thread B notices the new instance (via reading a volatile reference or how ever) and it starts - * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint()} when it cannot proceed + * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it cannot + * proceed * until the Thread B 'safe point' is attained. Thread A will be held inside in - * {@link #waitSafePoint()} until Thread B reaches the 'safe point'. Once there, Thread B frees + * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread B frees * Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the 'safe * point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it blocks * here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it needs @@ -891,7 +887,8 @@ public class FSHLog extends AbstractFSWAL { * 'writer/appender' thread. Appends edits and starts up sync runs. Tries its best to batch up * syncs. There is no discernible benefit batching appends so we just append as they come in * because it simplifies the below implementation. See metrics for batching effectiveness (In - * measurement, at 100 concurrent handlers writing 1k, we are batching > 10 appends and 10 handler + * measurement, at 100 concurrent handlers writing 1k, we are batching ≥ 10 appends and 10 + * handler * sync invocations for every actual dfsclient sync call; at 10 concurrent handlers, YMMV). *

* Herein, we have an array into which we store the sync futures as they come in. When we have a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 0755358..350e698 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -232,13 +232,12 @@ public class ProtobufLogReader extends ReaderBase { * PB_WAL_COMPLETE_MAGIC.size() bytes. In case trailer is not present, it returns false; * otherwise, sets the trailer and sets this.walEditsStopOffset variable up to the point just * before the trailer. - *

    * The trailer is ignored in case: - *
  • fileLength is 0 or not correct (when file is under recovery, etc). - *
  • the trailer size is negative. - *
+ *
  • fileLength is 0 or not correct (when file is under recovery, etc).<\li> the trailer size + * is negative. *

    - * In case the trailer size > this.trailerMaxSize, it is read after a WARN message. + * In case the trailer size ≥ this.trailerMaxSize, it is read after a WARN message. + *

    * @return true if a valid trailer is present * @throws IOException */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index 62dea53..5910f8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -396,8 +396,8 @@ class SequenceIdAccounting { /** * Iterates over the given Map and compares sequence ids with corresponding entries in - * {@link #oldestUnflushedRegionSequenceIds}. If a region in - * {@link #oldestUnflushedRegionSequenceIds} has a sequence id less than that passed in + * {@link #lowestUnflushedSequenceIds}. If a region in + * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in * sequenceids then return it. * @param sequenceids Sequenceids keyed by encoded region name. * @return regions found in this instance with sequence ids less than those passed in. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index 5ec218a..e298577 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -54,8 +54,8 @@ class SyncFuture { private long txid; /** - * The transaction id that was set in here when we were marked done. Should be equal or > txnId. - * Put this data member into the NOT_DONE state while this class is in use. + * The transaction id that was set in here when we were marked done. Should be equal or ≥ + * txnId. Put this data member into the NOT_DONE state while this class is in use. */ private long doneTxid; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 559c593..a6d2ab6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -163,7 +163,7 @@ class AccessControlFilter extends FilterBase { * @param pbBytes A pb serialized {@link AccessControlFilter} instance * @return An instance of {@link AccessControlFilter} made from bytes * @throws org.apache.hadoop.hbase.exceptions.DeserializationException - * @see {@link #toByteArray()} + * @see #toByteArray() */ public static AccessControlFilter parseFrom(final byte [] pbBytes) throws DeserializationException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java index d0d9b63..0b2c7b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/HbaseObjectWritableFor96Migration.java @@ -104,7 +104,7 @@ import com.google.protobuf.RpcController; /** *

    This is a customized version of the polymorphic hadoop - * {@link ObjectWritable}. It removes UTF8 (HADOOP-414). + * {@link org.apache.hadoop.io.ObjectWritable}. It removes UTF8 (HADOOP-414). * Using {@link Text} intead of UTF-8 saves ~2% CPU between reading and writing * objects running a short sequentialWrite Performance Evaluation test just in * ObjectWritable alone; more when we're doing randomRead-ing. Other @@ -113,7 +113,7 @@ import com.google.protobuf.RpcController; * if non-Writable classes are introduced -- if passed a Writable for which we * have no code, we just do the old-school passing of the class name, etc. -- * but passing codes the savings are large particularly when cell - * data is small (If < a couple of kilobytes, the encoding/decoding of class + * data is small (If < a couple of kilobytes, the encoding/decoding of class * name and reflection to instantiate class was costing in excess of the cell * handling). * @deprecated This class is needed migrating TablePermissions written with diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index 25cfc8b..5543c33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -83,7 +83,7 @@ public class TableAuthManager implements Closeable { /** * Returns a combined map of user and group permissions, with group names - * distinguished according to {@link AuthUtil.isGroupPrincipal} + * distinguished according to {@link AuthUtil#isGroupPrincipal} */ public ListMultimap getAllPermissions() { ListMultimap tmp = ArrayListMultimap.create(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index ce1da03..3273f96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -130,14 +130,12 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Stops the given region server, by attempting a gradual stop. - * @return whether the operation finished with success * @throws IOException if something goes wrong */ public abstract void stopRegionServer(ServerName serverName) throws IOException; /** * Wait for the specified region server to join the cluster - * @return whether the operation finished with success * @throws IOException if something goes wrong or timeout occurs */ public void waitForRegionServerToStart(String hostname, int port, long timeout) @@ -157,7 +155,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Wait for the specified region server to stop the thread / process. - * @return whether the operation finished with success * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForRegionServerToStop(ServerName serverName, long timeout) @@ -187,7 +184,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Wait for the specified zookeeper node to join the cluster - * @return whether the operation finished with success * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForZkNodeToStart(ServerName serverName, long timeout) @@ -195,7 +191,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Wait for the specified zookeeper node to stop the thread / process. - * @return whether the operation finished with success * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForZkNodeToStop(ServerName serverName, long timeout) @@ -224,7 +219,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Wait for the specified datanode to join the cluster - * @return whether the operation finished with success * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForDataNodeToStart(ServerName serverName, long timeout) @@ -232,7 +226,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { /** * Wait for the specified datanode to stop the thread / process. - * @return whether the operation finished with success * @throws IOException if something goes wrong or timeout occurs */ public abstract void waitForDataNodeToStop(ServerName serverName, long timeout) @@ -242,7 +235,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { * Starts a new master on the given hostname or if this is a mini/local cluster, * starts a master locally. * @param hostname the hostname to start the master on - * @return whether the operation finished with success * @throws IOException if something goes wrong */ public abstract void startMaster(String hostname, int port) throws IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index a6dc59f..378d35e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -599,7 +599,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Start a minidfscluster. * @param servers How many DNs to start. * @throws Exception - * @see {@link #shutdownMiniDFSCluster()} + * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception { @@ -614,7 +614,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * datanodes will have the same host name. * @param hosts hostnames DNs to run on. * @throws Exception - * @see {@link #shutdownMiniDFSCluster()} + * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ public MiniDFSCluster startMiniDFSCluster(final String hosts[]) @@ -632,7 +632,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param servers How many DNs to start. * @param hosts hostnames DNs to run on. * @throws Exception - * @see {@link #shutdownMiniDFSCluster()} + * @see #shutdownMiniDFSCluster() * @return The mini dfs cluster created. */ public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) @@ -857,7 +857,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Start up a minicluster of hbase, dfs, and zookeeper. * @throws Exception * @return Mini hbase cluster instance created. - * @see {@link #shutdownMiniDFSCluster()} + * @see #shutdownMiniDFSCluster() */ public MiniHBaseCluster startMiniCluster() throws Exception { return startMiniCluster(1, 1); @@ -869,7 +869,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * (will overwrite if dir already exists) * @throws Exception * @return Mini hbase cluster instance created. - * @see {@link #shutdownMiniDFSCluster()} + * @see #shutdownMiniDFSCluster() */ public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create) throws Exception { @@ -882,11 +882,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * subdirectory in a directory under System property test.build.data. * Directory is cleaned up on exit. * @param numSlaves Number of slaves to start up. We'll start this many - * datanodes and regionservers. If numSlaves is > 1, then make sure + * datanodes and regionservers. If numSlaves is ≥ 1, then make sure * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise * bind errors. * @throws Exception - * @see {@link #shutdownMiniCluster()} + * @see #shutdownMiniCluster() * @return Mini hbase cluster instance created. */ public MiniHBaseCluster startMiniCluster(final int numSlaves) @@ -898,7 +898,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Start minicluster. Whether to create a new root or data dir path even if such a path * has been created earlier is decided based on flag create * @throws Exception - * @see {@link #shutdownMiniCluster()} + * @see #shutdownMiniCluster() * @return Mini hbase cluster instance created. */ public MiniHBaseCluster startMiniCluster(final int numMasters, @@ -910,7 +910,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * start minicluster * @throws Exception - * @see {@link #shutdownMiniCluster()} + * @see #shutdownMiniCluster() * @return Mini hbase cluster instance created. */ public MiniHBaseCluster startMiniCluster(final int numMasters, @@ -932,13 +932,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * subdirectory in a directory under System property test.build.data. * Directory is cleaned up on exit. * @param numMasters Number of masters to start up. We'll start this many - * hbase masters. If numMasters > 1, you can find the active/primary master + * hbase masters. If numMasters ≥ 1, you can find the active/primary master * with {@link MiniHBaseCluster#getMaster()}. * @param numSlaves Number of slaves to start up. We'll start this many * regionservers. If dataNodeHosts == null, this also indicates the number of * datanodes to start. If dataNodeHosts != null, the number of datanodes is * based on dataNodeHosts.length. - * If numSlaves is > 1, then make sure + * If numSlaves is ≥ 1, then make sure * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise * bind errors. * @param dataNodeHosts hostnames DNs to run on. @@ -947,7 +947,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * If you start MiniDFSCluster without host names, * all instances of the datanodes will have the same host name. * @throws Exception - * @see {@link #shutdownMiniCluster()} + * @see #shutdownMiniCluster() * @return Mini hbase cluster instance created. */ public MiniHBaseCluster startMiniCluster(final int numMasters, @@ -971,13 +971,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * subdirectory in a directory under System property test.build.data. * Directory is cleaned up on exit. * @param numMasters Number of masters to start up. We'll start this many - * hbase masters. If numMasters > 1, you can find the active/primary master + * hbase masters. If numMasters ≥ 1, you can find the active/primary master * with {@link MiniHBaseCluster#getMaster()}. * @param numSlaves Number of slaves to start up. We'll start this many * regionservers. If dataNodeHosts == null, this also indicates the number of * datanodes to start. If dataNodeHosts != null, the number of datanodes is * based on dataNodeHosts.length. - * If numSlaves is > 1, then make sure + * If numSlaves is ≥ 1, then make sure * hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise * bind errors. * @param dataNodeHosts hostnames DNs to run on. @@ -989,7 +989,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param regionserverClass The class to use as HRegionServer, or null for * default * @throws Exception - * @see {@link #shutdownMiniCluster()} + * @see #shutdownMiniCluster() * @return Mini hbase cluster instance created. */ public MiniHBaseCluster startMiniCluster(final int numMasters, @@ -1070,7 +1070,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return Reference to the hbase mini hbase cluster. * @throws IOException * @throws InterruptedException - * @see {@link #startMiniCluster()} + * @see #startMiniCluster() */ public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves, Class masterClass, @@ -1152,7 +1152,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Stops mini hbase, zk, and hdfs clusters. * @throws IOException - * @see {@link #startMiniCluster(int)} + * @see #startMiniCluster(int) */ public void shutdownMiniCluster() throws Exception { LOG.info("Shutting down minicluster"); @@ -1841,9 +1841,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param tableName * @param startKey * @param stopKey - * @param callingMethod - * @param conf * @param isReadOnly + * @param durability + * @param wal * @param families * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. @@ -2004,7 +2004,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** A tracker for tracking and validating table rows - * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])} + * generated with + * {@link org.apache.hadoop.hbase.HBaseTestingUtility#loadTable(org.apache.hadoop.hbase.client.Table, byte[])} */ public static class SeenRowTracker { int dim = 'z' - 'a' + 1; @@ -2240,7 +2241,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return digest.toString(); } - /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */ + /** All the row values for the data loaded by + * {@link org.apache.hadoop.hbase.HBaseTestingUtility#loadTable(org.apache.hadoop.hbase.client.Table, byte[])} */ public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB static { int i = 0; @@ -3014,7 +3016,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the * regions have been all assigned. - * @see #waitTableEnabled(Admin, byte[], long) + * @see #waitTableEnabled(TableName, long) * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked enabled. * @throws InterruptedException @@ -3203,9 +3205,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Uses directly the assignment manager to assign the region. * and waits until the specified region has completed assignment. - * @param tableName the table name + * @param regionInfo the region info instance * @throws IOException - * @throw InterruptedException + * @throws InterruptedException * @return true if the region is assigned false otherwise. */ public boolean assignRegion(final HRegionInfo regionInfo) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index e5aec57..ab3b649 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -145,7 +145,7 @@ public class HFilePerformanceEvaluation { } /** - * Write a test HFile with the given codec & cipher + * Write a test HFile with the given codec & cipher * @param conf * @param fs * @param mf diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index cea10ebd..1f0d906 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -156,8 +156,8 @@ public abstract class MultithreadedTestUtil { * Used for unit tests that spawn threads. E.g., *

    * - * List> results = Lists.newArrayList(); - * Future f = executor.submit(new Callable { + * List≤Future≤Void≥≥ results = Lists.newArrayList(); + * Future≤Void≥ f = executor.submit(new Callable≤Void≥ { * public Void call() { * assertTrue(someMethod()); * } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index affa9b3..2318690 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -115,7 +115,7 @@ public class TestMetaTableAccessorNoCluster { /** * Test that MetaTableAccessor will ride over server throwing * "Server not running" IOEs. - * @see @link {https://issues.apache.org/jira/browse/HBASE-3446} + * @see HBASE-3446 * @throws IOException * @throws InterruptedException */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java index ba6e1d4..6b0c235 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java @@ -301,12 +301,13 @@ public class TestMetaTableLocator { * want to pass a mocked HRS; can be null. * @param client A mocked ClientProtocol instance, can be null * @return Mock up a connection that returns a {@link Configuration} when - * {@link HConnection#getConfiguration()} is called, a 'location' when - * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called, + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getConfiguration()} is called, a + * 'location' when + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getRegionLocation(TableName, byte[], boolean)} is called, * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when - * {@link HConnection#getAdmin(ServerName)} is called, returns the passed + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when - * {@link HConnection#getClient(ServerName)} is called. + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getClient(ServerName)} is called. * @throws IOException */ private ClusterConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java index d4f7cdd..6008bb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java @@ -203,7 +203,7 @@ public class TestStochasticBalancerJmxMetrics extends BalancerTestBase { } /** - * Read the attributes from Hadoop->HBase->Master->Balancer in JMX + * Read the attributes from Hadoop-≥HBase-≥Master-≥Balancer in JMX * @throws IOException */ private Set readJmxMetrics() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index c8ccd2a..fa495c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -43,7 +43,7 @@ public class HConnectionTestingUtility { /** * Get a Mocked {@link ClusterConnection} that goes with the passed conf * configuration instance. Minimally the mock will return - * conf when {@link ClusterConnection#getConfiguration()} is invoked. + * conf when {@link ClusterConnection#getConfiguration()} is invoked. * Be sure to shutdown the connection when done by calling * {@link Connection#close()} else it will stick around; this is probably not what you want. * @param conf configuration @@ -141,8 +141,8 @@ public class HConnectionTestingUtility { * @param conf configuration * @return ClusterConnection object for conf * @throws ZooKeeperConnectionException - * @see @link - * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} + * @see + * spy(T) */ public static ClusterConnection getSpiedConnection(final Configuration conf) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index ca4b609..36cafad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -2400,7 +2400,7 @@ public class TestFromClientSide { /** * HBASE-1182 - * Scan for columns > some timestamp + * Scan for columns ≥ some timestamp */ @Test public void testJiraTest1182() throws Exception { @@ -2847,7 +2847,7 @@ public class TestFromClientSide { /** * Verify a single column using gets. * Expects family and qualifier arrays to be valid for at least - * the range: idx-2 < idx < idx+2 + * the range: idx-2 ≤ idx ≤ idx+2 */ private void getVerifySingleColumn(Table ht, byte [][] ROWS, int ROWIDX, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 1b20b76..a192e93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -320,7 +320,7 @@ public class TestHCM { * individual timeout is fine. We do that with: * - client side: an operation timeout of 30 seconds * - server side: we sleep 20 second at each attempt. The first work fails, the second one - * succeeds. But the client won't wait that much, because 20 + 20 > 30, so the client + * succeeds. But the client won't wait that much, because 20 + 20 ≥ 30, so the client * timeouted when the server answers. */ @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index 56f01c3..cd321b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -39,7 +39,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Run tests related to {@link TimestampsFilter} using HBase client APIs. + * Run tests related to {@link org.apache.hadoop.hbase.filter.TimestampsFilter} using HBase client APIs. * Sets up the HBase mini cluster once at start. Each creates a table * named for the method and does its stuff against that. */ @@ -401,7 +401,7 @@ public class TestMultipleTimestamps { /** * Assert that the passed in KeyValue has expected contents for the - * specified row, column & timestamp. + * specified row, column & timestamp. */ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { @@ -427,7 +427,7 @@ public class TestMultipleTimestamps { /** * Uses the TimestampFilter on a Get to request a specified list of - * versions for the row/column specified by rowIdx & colIdx. + * versions for the row/column specified by rowIdx & colIdx. * */ private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java index c803752..5265701 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java @@ -286,7 +286,7 @@ public class TestTimestampsFilter { /** * Assert that the passed in KeyValue has expected contents for the - * specified row, column & timestamp. + * specified row, column & timestamp. */ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { @@ -312,7 +312,7 @@ public class TestTimestampsFilter { /** * Uses the TimestampFilter on a Get to request a specified list of - * versions for the row/column specified by rowIdx & colIdx. + * versions for the row/column specified by rowIdx & colIdx. * */ private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java index 4fe0d23..9002a99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java @@ -184,7 +184,7 @@ public class TestAggregateProtocol { } /** - * This will test the row count with startrow > endrow. The result should be + * This will test the row count with startrow ≥ endrow. The result should be * -1. * @throws Throwable */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 8da76a6..f41ec38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -531,7 +531,7 @@ public class TestWALObserver { * Copied from HRegion. * * @param familyMap - * map of family->edits + * map of family-≥edits * @param walEdit * the destination entry to append into */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java index 6642638..711f53b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestHttpServer.java @@ -338,7 +338,7 @@ public class TestHttpServer extends HttpServerFunctionalTest { } /** - * Custom user->group mapping service. + * Custom user-≥group mapping service. */ public static class MyGroupsProvider extends ShellBasedUnixGroupsMapping { static Map> mapping = new HashMap>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 0e5f08e..8ebde11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -65,9 +65,9 @@ public class TestHalfStoreFileReader { /** * Test the scanner and reseek of a half hfile scanner. The scanner API - * demands that seekTo and reseekTo() only return < 0 if the key lies + * demands that seekTo and reseekTo() only return ≤ 0 if the key lies * before the start of the file (with no position on the scanner). Returning - * 0 if perfect match (rare), and return > 1 if we got an imperfect match. + * 0 if perfect match (rare), and return ≥ 1 if we got an imperfect match. * * The latter case being the most common, we should generally be returning 1, * and if we do, there may or may not be a 'next' in the scanner/file. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 486c961..4d6b0a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -106,7 +106,7 @@ import org.junit.rules.TestRule; import org.mockito.Mockito; /** - * Simple test for {@link CellSortReducer} and {@link HFileOutputFormat2}. + * Simple test for {@link org.apache.hadoop.hbase.mapreduce.PutSortReducer} and {@link HFileOutputFormat2}. * Sets up and runs a mapreduce job that writes hfile output. * Creates a few inner classes to implement splits and an inputformat that * emits keys and values like those of {@link PerformanceEvaluation}. @@ -677,9 +677,9 @@ public class TestHFileOutputFormat2 { } /** - * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table, - * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap - * (Configuration)}. + * Test for + * {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.conf.Configuration, org.apache.hadoop.hbase.HTableDescriptor)} + * and {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. * Tests that the compression map is correctly serialized into * and deserialized from configuration * @@ -748,9 +748,9 @@ public class TestHFileOutputFormat2 { /** - * Test for {@link HFileOutputFormat2#configureBloomType(org.apache.hadoop.hbase.client.Table, - * Configuration)} and {@link HFileOutputFormat2#createFamilyBloomTypeMap - * (Configuration)}. + * Test for + * {@link HFileOutputFormat2#configureBloomType(org.apache.hadoop.hbase.HTableDescriptor, org.apache.hadoop.conf.Configuration)} + * and {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. * Tests that the compression map is correctly serialized into * and deserialized from configuration * @@ -819,9 +819,8 @@ public class TestHFileOutputFormat2 { } /** - * Test for {@link HFileOutputFormat2#configureBlockSize(org.apache.hadoop.hbase.client.Table, - * Configuration)} and {@link HFileOutputFormat2#createFamilyBlockSizeMap - * (Configuration)}. + * Test for {@link HFileOutputFormat2#configureBlockSize(org.apache.hadoop.hbase.HTableDescriptor, org.apache.hadoop.conf.Configuration)} + * and {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. * Tests that the compression map is correctly serialized into * and deserialized from configuration * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 50146fd..6f7d31d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -507,10 +507,9 @@ public class TestImportExport { /** * Count the number of keyvalues in the specified table for the given timerange - * @param start - * @param end * @param table - * @return + * @param filter + * @return count of number of key values * @throws IOException */ private int getCount(Table table, Filter filter) throws IOException { @@ -703,7 +702,10 @@ public class TestImportExport { } /** - * This listens to the {@link #visitLogEntryBeforeWrite(HTableDescriptor, WALKey, WALEdit)} to + * This listens to the + * + * + * {@link org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad.FindBulkHBaseListener#visitLogEntryBeforeWrite(org.apache.hadoop.hbase.wal.WALKey, org.apache.hadoop.hbase.regionserver.wal.WALEdit)} to * identify that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener extends WALActionsListener.Base { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java index 738ae5f..550399d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiHFileOutputFormat.java @@ -115,7 +115,8 @@ public class TestMultiHFileOutputFormat { } /** - * MR will output a 3 level directory, tableName->ColumnFamilyName->HFile this method to check the + * MR will output a 3 level directory, tableName-≥ColumnFamilyName-≥HFile this method to + * check the * created directory is correct or not A recursion method, the testDir had better be small size */ private boolean checkMROutput(FileSystem fs, Path testDir, int level) @@ -144,7 +145,7 @@ public class TestMultiHFileOutputFormat { } /** - * Simple mapper that makes output. With no input data + * Simple mapper that makes ≤TableName, KeyValue≥ output. With no input data */ static class Random_TableKV_GeneratingMapper extends Mapper { @@ -195,8 +196,8 @@ public class TestMultiHFileOutputFormat { } /** - * Simple Reducer that have input , with KeyValues have no order. and output - * , with KeyValues are ordered + * Simple Reducer that have input ≤TableName, KeyValue≥, with KeyValues have no order. + * and output ≤TableName, KeyValue≥, with KeyValues are ordered */ static class Table_KeyValueSortReducer diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 2927023..568574b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -137,9 +137,9 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { private final Random random = new Random(); /** - * Map of regions to map of rows and {@link Results}. Used as data source when - * {@link MockRegionServer#get(byte[], Get)} is called. Because we have a byte - * key, need to use TreeMap and provide a Comparator. Use + * Map of regions to map of rows and {@link org.apache.hadoop.hbase.client.Result}s. Used as + * data source when {@link MockRegionServer#get(byte[], Get)} is called. Because we have a byte + * key, need to use TreeMap and provide a Comparator. Use * {@link #setGetResult(byte[], byte[], Result)} filling this map. */ private final Map> gets = @@ -192,7 +192,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } /** - * Use this method filling the backing data source used by {@link #get(byte[], Get)} + * Use this method filling the backing data source used by {@link get(byte[], Get)} * @param regionName * @param row * @param r diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index e10ab2a..662bce4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -190,7 +190,7 @@ public class TestActiveMasterManager { /** * Assert there is an active master and that it has the specified address. * @param zk - * @param thisMasterAddress + * @param expectedAddress * @throws KeeperException * @throws IOException */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index d7f0a32..d224d82 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -352,7 +352,7 @@ public class TestRegionPlacement { /** * Verify the meta has updated to the latest assignment plan - * @param plan + * @param expectedPlan * @throws IOException */ private void verifyMETAUpdated(FavoredNodesPlan expectedPlan) @@ -530,9 +530,8 @@ public class TestRegionPlacement { /** * Create a table with specified table name and region number. - * @param tablename + * @param tableName * @param regionNum - * @return * @throws IOException */ private static void createTable(TableName tableName, int regionNum) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java index cb7337e..13a9f0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -46,7 +46,7 @@ import org.junit.After; import org.junit.Test; /** - * Run tests that use the HBase clients; {@link HTable}. + * Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.HTable}. * Sets up the HBase mini cluster once at start and runs through all client tests. * Each creates a table named for the method and does its stuff against that. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 7970d62..ddc630d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -282,10 +282,10 @@ public class TestPartitionedMobCompactor { /** * Creates store files. * @param basePath the path to create file - * @family the family name - * @qualifier the column qualifier - * @count the store file number - * @type the key type + * @param family the family name + * @param qualifier the column qualifier + * @param count the store file number + * @param type the key type */ private void createStoreFiles(Path basePath, String family, String qualifier, int count, Type type) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java index 0d31108..34d7efd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java @@ -224,7 +224,8 @@ public class TestProcedureMember { /** * Fail correctly if coordinator aborts the procedure. The subprocedure will not interrupt a - * running {@link Subprocedure#prepare} -- prepare needs to finish first, and the the abort + * running {@link org.apache.hadoop.hbase.procedure.Subprocedure} -- prepare needs to finish + * first, and the the abort * is checked. Thus, the {@link Subprocedure#prepare} should succeed but later get rolled back * via {@link Subprocedure#cleanup}. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java index 211e9e6..5bc2c87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java @@ -330,7 +330,6 @@ public class TestZKProcedure { /** * Wait for the coordinator task to complete, and verify all the mocks - * @param task to wait on * @throws Exception on unexpected failure */ private void waitAndVerifyProc(Procedure proc, VerificationMode prepare, @@ -354,7 +353,6 @@ public class TestZKProcedure { /** * Wait for the coordinator task to complete, and verify all the mocks - * @param task to wait on * @throws Exception on unexpected failure */ private void waitAndVerifySubproc(Subprocedure op, VerificationMode prepare, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index d0c0089..55b5c34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -121,7 +121,7 @@ public class TestCompoundBloomFilter { private FileSystem fs; private BlockCache blockCache; - /** A message of the form "in test#:" to include in logging. */ + /** A message of the form "in test#≤number≥:" to include in logging. */ private String testIdMsg; private static final int GENERATION_SEED = 2319; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 9aa3a9b..8e465c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -5231,7 +5231,7 @@ public class TestHRegion { /** * Assert that the passed in Cell has expected contents for the specified row, - * column & timestamp. + * column & timestamp. */ private void checkOneCell(Cell kv, byte[] cf, int rowIdx, int colIdx, long ts) { String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index a3804dd..bad75b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -1436,7 +1436,7 @@ public class TestHRegionReplayEvents { } /** - * Paths can be qualified or not. This does the assertion using String->Path conversion. + * Paths can be qualified or not. This does the assertion using String-≥Path conversion. */ private void assertPathListsEqual(List list1, List list2) { List l1 = new ArrayList<>(list1.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java index 6a4aceb..03b9386 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java @@ -121,7 +121,6 @@ public class TestMobStoreScanner { * * @param reversed if true, scan will be backward order * @param mobScanRaw if true, scan will get the mob reference - * @return this */ public void setScan(Scan scan, boolean reversed, boolean mobScanRaw) { scan.setReversed(reversed); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java index 4d3a1c3..dfb0c47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java @@ -109,7 +109,7 @@ public class TestRowTooBig { * Usecase: * * - create a row with 1M cells, 10 bytes in each - * - flush & run major compaction + * - flush & run major compaction * - try to Get whole row. * * OOME happened in StoreScanner.next(..). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 1b42754..ea2ce77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -483,7 +483,8 @@ public class TestScanner { /** * Tests to do a concurrent flush (using a 2nd thread) while scanning. This tests both - * the StoreScanner update readers and the transition from memstore -> snapshot -> store file. + * the StoreScanner update readers and the transition from memstore -≥ snapshot -≥ store + * file. * * @throws Exception */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 96ec698..09cb66a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -511,7 +511,7 @@ public class TestSplitLogWorker { /** * Create a mocked region server service instance - * @param server + * @param name * @return */ private RegionServerServices getRegionServer(ServerName name) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index d8770e0..e981b88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -569,7 +569,7 @@ public class TestStripeCompactionPolicy { * @param count Expected # of resulting stripes, null if not checked. * @param size Expected target stripe size, null if not checked. * @param start Left boundary of the compaction. - * @param righr Right boundary of the compaction. + * @param end Right boundary of the compaction. */ private void verifyCompaction(StripeCompactionPolicy policy, StripeInformationProvider si, Collection sfs, Boolean dropDeletes, Integer count, Long size, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index 101758e..56a02f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -45,12 +45,12 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.DefaultCodec; /** - * Implementation of {@link WALProvider.Writer} that delegates to + * Implementation of {@link org.apache.hadoop.hbase.wal.WALProvider.Writer} that delegates to * SequenceFile.Writer. Legacy implementation only used for compat tests. * * Note that because this class writes to the legacy hadoop-specific SequenceFile * format, users of it must write {@link HLogKey} keys and not arbitrary - * {@link WALKey}s because the latter are not Writables (nor made to work with + * {@link org.apache.hadoop.hbase.wal.WALKey}s because the latter are not Writables (nor made to work with * Hadoop serialization). */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index bf46b03..b8ed73f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -115,8 +115,8 @@ public class TestLogRolling extends AbstractTestLogRolling { } /** - * Tests that logs are rolled upon detecting datanode death Requires an HDFS jar with HDFS-826 & - * syncFs() support (HDFS-200) + * Tests that logs are rolled upon detecting datanode death Requires an HDFS jar with HDFS-826 + * & syncFs() support (HDFS-200) */ @Test public void testLogRollOnDatanodeDeath() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index f77bafe..235a7c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -139,7 +139,7 @@ public class TestMasterReplication { } /** - * It tests the replication scenario involving 0 -> 1 -> 0. It does it by + * It tests the replication scenario involving 0 -≥ 1 -≥ 0. It does it by * adding and deleting a row to a table in each cluster, checking if it's * replicated. It also tests that the puts and deletes are not replicated back * to the originating cluster. @@ -170,7 +170,7 @@ public class TestMasterReplication { } /** - * It tests the replication scenario involving 0 -> 1 -> 0. It does it by bulk loading a set of + * It tests the replication scenario involving 0 -≥ 1 -≥ 0. It does it by bulk loading a set of * HFiles to a table in each cluster, checking if it's replicated. */ @Test(timeout = 300000) @@ -223,7 +223,7 @@ public class TestMasterReplication { } /** - * Tests the cyclic replication scenario of 0 -> 1 -> 2 -> 0 by adding and deleting rows to a + * Tests the cyclic replication scenario of 0 -≥ 1 -≥ 2 -≥ 0 by adding and deleting rows to a * table in each clusters and ensuring that the each of these clusters get the appropriate * mutations. It also tests the grouping scenario where a cluster needs to replicate the edits * originating from itself and also the edits that it received using replication from a different @@ -278,7 +278,7 @@ public class TestMasterReplication { } /** - * It tests the multi slave hfile replication scenario involving 0 -> 1, 2. It does it by bulk + * It tests the multi slave hfile replication scenario involving 0 -≥ 1, 2. It does it by bulk * loading a set of HFiles to a table in master cluster, checking if it's replicated in its peers. */ @Test(timeout = 300000) @@ -391,7 +391,7 @@ public class TestMasterReplication { } /** - * Tests cyclic replication scenario of 0 -> 1 -> 2 -> 1. + * Tests cyclic replication scenario of 0 -≥ 1 -≥ 2 -≥ 1. */ @Test(timeout = 300000) public void testCyclicReplication3() throws Exception { diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala index 0c29f50..1a3c370 100644 --- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala +++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala @@ -26,7 +26,11 @@ import org.apache.hadoop.hbase.mapred.TableOutputFormat import org.apache.hadoop.hbase.spark.datasources._ import org.apache.hadoop.hbase.types._ import org.apache.hadoop.hbase.util.{Bytes, PositionedByteRange, SimplePositionedMutableByteRange} -import org.apache.hadoop.hbase._ +import org.apache.hadoop.hbase.HBaseConfiguration +import org.apache.hadoop.hbase.HTableDescriptor +import org.apache.hadoop.hbase.HColumnDescriptor +import org.apache.hadoop.hbase.TableName +import org.apache.hadoop.hbase.CellUtil import org.apache.hadoop.mapred.JobConf import org.apache.spark.Logging import org.apache.spark.rdd.RDD