commit dd8c5627930c95a08397aa703f4e911fc0177197 Author: stack Date: Mon Aug 25 23:08:40 2014 -0700 Change EE#getCurrentTimeMills to EE#getCurrentTime diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index e9ca88b..c63e4c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -1411,7 +1411,7 @@ public class MetaTableAccessor { public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){ // using regionserver's local time as the timestamp of Put. // See: HBASE-11536 - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now, Bytes.toBytes(sn.getHostAndPort())); p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 2768da0..34136e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -943,7 +943,7 @@ class AsyncProcess { * Starts waiting to issue replica calls on a different thread; or issues them immediately. */ private void startWaitingForReplicaCalls(List> actionsForReplicaThread) { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); ReplicaCallIssuingRunnable replicaRunnable = new ReplicaCallIssuingRunnable( actionsForReplicaThread, startTime); if (primaryCallTimeoutMicroseconds == 0) { @@ -1421,10 +1421,10 @@ class AsyncProcess { private boolean waitUntilDone(long cutoff) throws InterruptedException { boolean hasWait = cutoff != Long.MAX_VALUE; - long lastLog = EnvironmentEdgeManager.currentTimeMillis(); + long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress; while (0 != (currentInProgress = actionsInProgress.get())) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (hasWait && (now * 1000L) > cutoff) { return false; } @@ -1504,11 +1504,11 @@ class AsyncProcess { /** Wait until the async does not have more than max tasks in progress. */ private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException { - long lastLog = EnvironmentEdgeManager.currentTimeMillis(); + long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress, oldInProgress = Long.MAX_VALUE; while ((currentInProgress = this.tasksInProgress.get()) > max) { if (oldInProgress != currentInProgress) { // Wait for in progress to change. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > lastLog + 10000) { lastLog = now; LOG.info("#" + id + ", waiting for some tasks to finish. Expected max=" diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 0813745..7c9c0b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -2513,7 +2513,7 @@ class ConnectionManager { public ServerErrorTracker(long timeout, int maxRetries) { this.maxRetries = maxRetries; - this.canRetryUntil = EnvironmentEdgeManager.currentTimeMillis() + timeout; + this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; this.startTrackingTime = new Date().getTime(); } @@ -2523,7 +2523,7 @@ class ConnectionManager { boolean canRetryMore(int numRetry) { // If there is a single try we must not take into account the time. return numRetry < maxRetries || (maxRetries > 1 && - EnvironmentEdgeManager.currentTimeMillis() < this.canRetryUntil); + EnvironmentEdgeManager.currentTime() < this.canRetryUntil); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index fe8fb31..5d25c0b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -822,7 +822,7 @@ public class HBaseAdmin implements Admin { */ private void waitUntilTableIsEnabled(final TableName tableName) throws IOException { boolean enabled = false; - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { try { enabled = isTableEnabled(tableName); @@ -848,7 +848,7 @@ public class HBaseAdmin implements Admin { } } if (!enabled) { - long msec = EnvironmentEdgeManager.currentTimeMillis() - start; + long msec = EnvironmentEdgeManager.currentTime() - start; throw new IOException("Table '" + tableName + "' not yet enabled, after " + msec + "ms."); } @@ -2802,7 +2802,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot) .build(); IsSnapshotDoneResponse done = null; - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); long max = response.getExpectedTimeout(); long maxPauseTime = max / this.numRetries; int tries = 0; @@ -2810,7 +2810,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + maxPauseTime + " ms per retry)"); while (tries == 0 - || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone())) { + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); @@ -3011,7 +3011,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName .replace("{snapshot.name}", snapshotName) .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) - .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); + .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); snapshot(failSafeSnapshotSnapshotName, tableName); } @@ -3185,7 +3185,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) } }); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); long max = response.getExpectedTimeout(); long maxPauseTime = max / this.numRetries; int tries = 0; @@ -3193,7 +3193,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); boolean done = false; while (tries == 0 - || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done)) { + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 9d378ae..4c2c4d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -485,7 +485,7 @@ public class HTableMultiplexer { int failedCount = 0; while (true) { try { - start = elapsed = EnvironmentEdgeManager.currentTimeMillis(); + start = elapsed = EnvironmentEdgeManager.currentTime(); // Clear the processingList, putToStatusMap and failedCount processingList.clear(); @@ -545,7 +545,7 @@ public class HTableMultiplexer { // Update the totalFailedCount this.totalFailedPutCount.addAndGet(failedCount); - elapsed = EnvironmentEdgeManager.currentTimeMillis() - start; + elapsed = EnvironmentEdgeManager.currentTime() - start; // Update latency counters averageLatency.add(elapsed); if (elapsed > maxLatency.get()) { @@ -566,7 +566,7 @@ public class HTableMultiplexer { // Sleep for a while if (elapsed == start) { - elapsed = EnvironmentEdgeManager.currentTimeMillis() - start; + elapsed = EnvironmentEdgeManager.currentTime() - start; } if (elapsed < frequency) { try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java index 9e11a27..cf9a210 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java @@ -73,7 +73,7 @@ public class RpcRetryingCaller { } else { if (callTimeout == Integer.MAX_VALUE) return Integer.MAX_VALUE; int remainingTime = (int) (callTimeout - - (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime)); + (EnvironmentEdgeManager.currentTime() - this.globalStartTime)); if (remainingTime < MIN_RPC_TIMEOUT) { // If there is no time left, we're trying anyway. It's too late. // 0 means no timeout, and it's not the intent here. So we secure both cases by @@ -103,7 +103,7 @@ public class RpcRetryingCaller { throws IOException, RuntimeException { List exceptions = new ArrayList(); - this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis(); + this.globalStartTime = EnvironmentEdgeManager.currentTime(); for (int tries = 0;; tries++) { long expectedSleep; try { @@ -113,7 +113,7 @@ public class RpcRetryingCaller { ExceptionUtil.rethrowIfInterrupt(t); if (LOG.isTraceEnabled()) { LOG.trace("Call exception, tries=" + tries + ", retries=" + retries + ", started=" + - (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + " ms ago, " + (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + " ms ago, " + "cancelled=" + cancelled.get(), t); } @@ -122,7 +122,7 @@ public class RpcRetryingCaller { callable.throwable(t, retries != 1); RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTimeMillis(), toString()); + EnvironmentEdgeManager.currentTime(), toString()); exceptions.add(qt); if (tries >= retries - 1) { throw new RetriesExhaustedException(tries, exceptions); @@ -158,7 +158,7 @@ public class RpcRetryingCaller { * @return Calculate how long a single call took */ private long singleCallDuration(final long expectedSleep) { - return (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + expectedSleep; + return (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + expectedSleep; } /** @@ -173,7 +173,7 @@ public class RpcRetryingCaller { public T callWithoutRetries(RetryingCallable callable, int callTimeout) throws IOException, RuntimeException { // The code of this method should be shared with withRetries. - this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis(); + this.globalStartTime = EnvironmentEdgeManager.currentTime(); try { callable.prepare(false); return callable.call(callTimeout); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 6cd422f..f15ad02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -252,7 +252,7 @@ public class RpcRetryingCallerWithReadReplicas { RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTimeMillis(), null); + EnvironmentEdgeManager.currentTime(), null); List exceptions = Collections.singletonList(qt); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 0262462..ff5168d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -188,7 +188,7 @@ public class RpcClient { * Add an address to the list of the failed servers list. */ public synchronized void addToFailedServers(InetSocketAddress address) { - final long expiry = EnvironmentEdgeManager.currentTimeMillis() + recheckServersTimeout; + final long expiry = EnvironmentEdgeManager.currentTime() + recheckServersTimeout; failedServers.addFirst(new Pair(expiry, address.toString())); } @@ -203,7 +203,7 @@ public class RpcClient { } final String lookup = address.toString(); - final long now = EnvironmentEdgeManager.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); // iterate, looking for the search entry and cleaning expired entries Iterator> it = failedServers.iterator(); @@ -261,7 +261,7 @@ public class RpcClient { this.param = param; this.md = md; this.cells = cells; - this.startTime = EnvironmentEdgeManager.currentTimeMillis(); + this.startTime = EnvironmentEdgeManager.currentTime(); this.responseDefaultType = responseDefaultType; this.id = callIdCnt.getAndIncrement(); this.timeout = timeout; @@ -277,7 +277,7 @@ public class RpcClient { return false; } - long waitTime = EnvironmentEdgeManager.currentTimeMillis() - getStartTime(); + long waitTime = EnvironmentEdgeManager.currentTime() - getStartTime(); if (waitTime >= timeout) { IOException ie = new CallTimeoutException("Call id=" + id + ", waitTime=" + waitTime + ", operationTimeout=" + timeout + " expired."); @@ -293,7 +293,7 @@ public class RpcClient { return Integer.MAX_VALUE; } - int remaining = timeout - (int) (EnvironmentEdgeManager.currentTimeMillis() - getStartTime()); + int remaining = timeout - (int) (EnvironmentEdgeManager.currentTime() - getStartTime()); return remaining > 0 ? remaining : 0; } @@ -731,9 +731,9 @@ public class RpcClient { protected synchronized boolean waitForWork() throws InterruptedException { // beware of the concurrent access to the calls list: we can add calls, but as well // remove them. - long waitUntil = EnvironmentEdgeManager.currentTimeMillis() + minIdleTimeBeforeClose; + long waitUntil = EnvironmentEdgeManager.currentTime() + minIdleTimeBeforeClose; while (!shouldCloseConnection.get() && running.get() && - EnvironmentEdgeManager.currentTimeMillis() < waitUntil && calls.isEmpty()) { + EnvironmentEdgeManager.currentTime() < waitUntil && calls.isEmpty()) { wait(Math.min(minIdleTimeBeforeClose, 1000)); } @@ -752,7 +752,7 @@ public class RpcClient { return true; } - if (EnvironmentEdgeManager.currentTimeMillis() >= waitUntil) { + if (EnvironmentEdgeManager.currentTime() >= waitUntil) { // Connection is idle. // We expect the number of calls to be zero here, but actually someone can // adds a call at the any moment, as there is no synchronization between this task @@ -1249,7 +1249,7 @@ public class RpcClient { // To catch the calls without timeout that were cancelled. itor.remove(); } else if (allCalls) { - long waitTime = EnvironmentEdgeManager.currentTimeMillis() - c.getStartTime(); + long waitTime = EnvironmentEdgeManager.currentTime() - c.getStartTime(); IOException ie = new IOException("Connection to " + getRemoteAddress() + " is closing. Call id=" + c.id + ", waitTime=" + waitTime); c.setException(ie); @@ -1673,7 +1673,7 @@ public class RpcClient { throws ServiceException { long startTime = 0; if (LOG.isTraceEnabled()) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); } int callTimeout = 0; CellScanner cells = null; @@ -1695,7 +1695,7 @@ public class RpcClient { } if (LOG.isTraceEnabled()) { - long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long callTime = EnvironmentEdgeManager.currentTime() - startTime; LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms"); } return val.getFirst(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java index e8eab93..b8461f2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java @@ -25,15 +25,13 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public class DefaultEnvironmentEdge implements EnvironmentEdge { - - /** * {@inheritDoc} *

* This implementation returns {@link System#currentTimeMillis()} */ @Override - public long currentTimeMillis() { + public long currentTime() { return System.currentTimeMillis(); } -} +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java index a43fa66..ee8c00a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java @@ -28,11 +28,10 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public interface EnvironmentEdge { - /** - * Returns the currentTimeMillis. + * Returns the currentTime. * - * @return currentTimeMillis. + * @return Current time. */ - long currentTimeMillis(); -} + long currentTime(); +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java index c7d4b25..809bbe3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java @@ -67,11 +67,11 @@ public class EnvironmentEdgeManager { /** * Defers to the delegate and calls the - * {@link EnvironmentEdge#currentTimeMillis()} method. + * {@link EnvironmentEdge#currentTime()} method. * * @return current time in millis according to the delegate. */ - public static long currentTimeMillis() { - return getDelegate().currentTimeMillis(); + public static long currentTime() { + return getDelegate().currentTime(); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java index c1ce25c..18a258d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java @@ -50,7 +50,7 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge { * method is called. The first value is 1. */ @Override - public synchronized long currentTimeMillis() { + public synchronized long currentTime() { return timeIncrement++; } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java index 13e42fb..bd9efb5 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java @@ -58,11 +58,9 @@ public class TestEnvironmentEdgeManager { EnvironmentEdge mock = mock(EnvironmentEdge.class); EnvironmentEdgeManager.injectEdge(mock); long expectation = 3456; - when(mock.currentTimeMillis()).thenReturn(expectation); - long result = EnvironmentEdgeManager.currentTimeMillis(); - verify(mock).currentTimeMillis(); + when(mock.currentTime()).thenReturn(expectation); + long result = EnvironmentEdgeManager.currentTime(); + verify(mock).currentTime(); assertEquals(expectation, result); } - -} - +} \ No newline at end of file diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java index 1180249..8b6f975 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java @@ -98,7 +98,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION") public byte[] getData() { // try at most twice/minute - if (needSetup && EnvironmentEdgeManager.currentTimeMillis() > lastSetupTry + 30000) { + if (needSetup && EnvironmentEdgeManager.currentTime() > lastSetupTry + 30000) { synchronized (this) { // make sure only one thread tries to reconnect if (needSetup) { @@ -112,7 +112,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { try { LOG.debug("Connecting to ZK"); // record this attempt - lastSetupTry = EnvironmentEdgeManager.currentTimeMillis(); + lastSetupTry = EnvironmentEdgeManager.currentTime(); if (zk.exists(node, false) != null) { data = zk.getData(node, this, null); LOG.debug("Read synchronously: "+(data == null ? "null" : Bytes.toLong(data))); @@ -186,7 +186,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { if (oldSI.getTtl() == Long.MAX_VALUE) { return null; } - long ttl = Math.max(EnvironmentEdgeManager.currentTimeMillis() - + long ttl = Math.max(EnvironmentEdgeManager.currentTime() - Bytes.toLong(data), oldSI.getTtl()); return new ScanInfo(store.getFamily(), ttl, oldSI.getTimeToPurgeDeletes(), oldSI.getComparator()); diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index 4ab53c7..824910a 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -75,7 +75,7 @@ public class TestZooKeeperScanPolicyObserver { desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "test", null); ZooKeeper zk = zkw.getRecoverableZooKeeper().getZooKeeper(); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java index 377bbdd..a1e306d 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java @@ -235,7 +235,7 @@ public class IntegrationTestLazyCfLoading { writer.start(1, keysToWrite, WRITER_THREADS); // Now, do scans. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); long timeLimit = now + (maxRuntime * 60000); boolean isWriterDone = false; while (now < timeLimit && !isWriterDone) { @@ -255,7 +255,7 @@ public class IntegrationTestLazyCfLoading { // Not a strict lower bound - writer knows nothing about filters, so we report // this from generator. Writer might have generated the value but not put it yet. long onesGennedBeforeScan = dataGen.getExpectedNumberOfKeys(); - long startTs = EnvironmentEdgeManager.currentTimeMillis(); + long startTs = EnvironmentEdgeManager.currentTime(); ResultScanner results = table.getScanner(scan); long resultCount = 0; Result result = null; @@ -265,7 +265,7 @@ public class IntegrationTestLazyCfLoading { Assert.assertTrue("Failed to verify [" + Bytes.toString(result.getRow())+ "]", isOk); ++resultCount; } - long timeTaken = EnvironmentEdgeManager.currentTimeMillis() - startTs; + long timeTaken = EnvironmentEdgeManager.currentTime() - startTs; // Verify the result count. long onesGennedAfterScan = dataGen.getExpectedNumberOfKeys(); Assert.assertTrue("Read " + resultCount + " keys when at most " + onesGennedAfterScan @@ -280,7 +280,7 @@ public class IntegrationTestLazyCfLoading { LOG.info("Scan took " + timeTaken + "ms"); if (!isWriterDone) { Thread.sleep(WAIT_BETWEEN_SCANS_MS); - now = EnvironmentEdgeManager.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTime(); } } Assert.assertEquals("There are write failures", 0, writer.getNumWriteFailures()); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index 1e2203a..f7f8727 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -246,7 +246,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { private void runLinkedListMRJob(int iteration) throws Exception { String jobName = IntegrationTestBulkLoad.class.getSimpleName() + " - " + - EnvironmentEdgeManager.currentTimeMillis(); + EnvironmentEdgeManager.currentTime(); Configuration conf = new Configuration(util.getConfiguration()); Path p = util.getDataTestDirOnTestFS(getTablename() + "-" + iteration); HTable table = new HTable(conf, getTablename()); @@ -639,7 +639,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { private void runCheck() throws IOException, ClassNotFoundException, InterruptedException { LOG.info("Running check"); Configuration conf = getConf(); - String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTimeMillis(); + String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime(); Path p = util.getDataTestDirOnTestFS(jobName); Job job = new Job(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index f9cc60f..a04cb88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -255,7 +255,7 @@ public class HFileArchiver { } // do the actual archive - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); File file = new FileablePath(fs, storeFile); if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { throw new IOException("Failed to archive/delete the file for region:" @@ -280,7 +280,7 @@ public class HFileArchiver { private static boolean resolveAndArchive(FileSystem fs, Path baseArchiveDir, Collection toArchive) throws IOException { if (LOG.isTraceEnabled()) LOG.trace("Starting to archive " + toArchive); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); List failures = resolveAndArchive(fs, baseArchiveDir, toArchive, start); // notify that some files were not archived. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java index 79bfcde..ba25ac6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java @@ -63,7 +63,7 @@ public class TimeoutExceptionInjector { // mark the task is run, to avoid repeats TimeoutExceptionInjector.this.complete = true; } - long end = EnvironmentEdgeManager.currentTimeMillis(); + long end = EnvironmentEdgeManager.currentTime(); TimeoutException tee = new TimeoutException( "Timeout caused Foreign Exception", start, end, maxTime); String source = "timer-" + timer; @@ -107,7 +107,7 @@ public class TimeoutExceptionInjector { } LOG.debug("Scheduling process timer to run in: " + maxTime + " ms"); timer.schedule(timerTask, maxTime); - this.start = EnvironmentEdgeManager.currentTimeMillis(); + this.start = EnvironmentEdgeManager.currentTime(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 86c8944..2bb0b8c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -918,7 +918,7 @@ public class BucketCache implements BlockCache, HeapSize { * cache */ private void checkIOErrorIsTolerated() { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (this.ioErrorStartTime > 0) { if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) { LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 5a290bf..3069dc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -32,7 +32,7 @@ public class BucketCacheStats extends CacheStats { private final AtomicLong ioHitCount = new AtomicLong(0); private final AtomicLong ioHitTime = new AtomicLong(0); private final static int nanoTime = 1000000; - private long lastLogTime = EnvironmentEdgeManager.currentTimeMillis(); + private long lastLogTime = EnvironmentEdgeManager.currentTime(); BucketCacheStats() { super("BucketCache"); @@ -50,7 +50,7 @@ public class BucketCacheStats extends CacheStats { } public long getIOHitsPerSecond() { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); long took = (now - lastLogTime) / 1000; lastLogTime = now; return took == 0? 0: ioHitCount.get() / took; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 53f159a..be481b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -670,7 +670,7 @@ public class AssignmentManager { */ boolean assign(final ServerName destination, final List regions) throws InterruptedException { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { int regionCount = regions.size(); if (regionCount == 0) { @@ -822,7 +822,7 @@ public class AssignmentManager { LOG.debug("Bulk assigning done for " + destination); return true; } finally { - metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTime() - startTime); } } @@ -884,11 +884,11 @@ public class AssignmentManager { } else { if (maxWaitTime < 0) { maxWaitTime = - EnvironmentEdgeManager.currentTimeMillis() + EnvironmentEdgeManager.currentTime() + conf.getLong(ALREADY_IN_TRANSITION_WAITTIME, DEFAULT_ALREADY_IN_TRANSITION_WAITTIME); } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now < maxWaitTime) { LOG.debug("Region is already in transition; " + "waiting up to " + (maxWaitTime - now) + "ms", t); @@ -980,7 +980,7 @@ public class AssignmentManager { * @param forceNewPlan */ private void assign(RegionState state, boolean forceNewPlan) { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { Configuration conf = server.getConfiguration(); RegionPlan plan = null; @@ -1083,18 +1083,18 @@ public class AssignmentManager { if (maxWaitTime < 0) { if (t instanceof RegionAlreadyInTransitionException) { - maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + maxWaitTime = EnvironmentEdgeManager.currentTime() + this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME, DEFAULT_ALREADY_IN_TRANSITION_WAITTIME); } else { - maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + maxWaitTime = EnvironmentEdgeManager.currentTime() + this.server.getConfiguration().getLong( "hbase.regionserver.rpc.startup.waittime", 60000); } } try { needNewPlan = false; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now < maxWaitTime) { LOG.debug("Server is not yet up or region is already in transition; " + "waiting up to " + (maxWaitTime - now) + "ms", t); @@ -1175,7 +1175,7 @@ public class AssignmentManager { // Run out of attempts regionStates.updateRegionState(region, State.FAILED_OPEN); } finally { - metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTime() - startTime); } } @@ -1922,7 +1922,7 @@ public class AssignmentManager { public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut) throws InterruptedException { if (!regionStates.isRegionInTransition(hri)) return true; - long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis() + long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTime() + timeOut; // There is already a timeout monitor on regions in transition so I // should not have to have one here too? @@ -1930,7 +1930,7 @@ public class AssignmentManager { " to leave regions-in-transition, timeOut=" + timeOut + " ms."); while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) { regionStates.waitForUpdate(100); - if (EnvironmentEdgeManager.currentTimeMillis() > end) { + if (EnvironmentEdgeManager.currentTime() > end) { LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned."); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 6fe190f..85ec8cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -139,7 +139,7 @@ public class ClusterStatusPublisher extends Chore { return; } - final long curTime = EnvironmentEdgeManager.currentTimeMillis(); + final long curTime = EnvironmentEdgeManager.currentTime(); if (lastMessageTime > curTime - messagePeriod) { // We already sent something less than 10 second ago. Done. return; @@ -177,7 +177,7 @@ public class ClusterStatusPublisher extends Chore { */ protected List generateDeadServersListToSend() { // We're getting the message sent since last time, and add them to the list - long since = EnvironmentEdgeManager.currentTimeMillis() - messagePeriod * 2; + long since = EnvironmentEdgeManager.currentTime() - messagePeriod * 2; for (Pair dead : getDeadServers(since)) { lastSent.putIfAbsent(dead.getFirst(), 0); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index 48b6ccf..5c232a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -110,7 +110,7 @@ public class DeadServer { public synchronized void add(ServerName sn) { this.numProcessing++; if (!deadServers.containsKey(sn)){ - deadServers.put(sn, EnvironmentEdgeManager.currentTimeMillis()); + deadServers.put(sn, EnvironmentEdgeManager.currentTime()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 9b3a948..87717ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -383,9 +383,9 @@ public class MasterFileSystem { List logDirs = getLogDirs(serverNames); splitLogManager.handleDeadWorkers(serverNames); - splitTime = EnvironmentEdgeManager.currentTimeMillis(); + splitTime = EnvironmentEdgeManager.currentTime(); splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter); - splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime; + splitTime = EnvironmentEdgeManager.currentTime() - splitTime; if (this.metricsMasterFilesystem != null) { if (filter == META_FILTER) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index b9414cd..6a68593 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -314,7 +314,7 @@ public class SplitLogManager extends ZooKeeperListener { LOG.debug("Scheduling batch of logs to split"); SplitLogCounters.tot_mgr_log_split_batch_start.incrementAndGet(); LOG.info("started splitting " + logfiles.length + " logs in " + logDirs); - long t = EnvironmentEdgeManager.currentTimeMillis(); + long t = EnvironmentEdgeManager.currentTime(); long totalSize = 0; TaskBatch batch = new TaskBatch(); Boolean isMetaRecovery = (filter == null) ? null : false; @@ -368,7 +368,7 @@ public class SplitLogManager extends ZooKeeperListener { } String msg = "finished splitting (more than or equal to) " + totalSize + " bytes in " + batch.installed + " log files in " + logDirs + " in " + - (EnvironmentEdgeManager.currentTimeMillis() - t) + "ms"; + (EnvironmentEdgeManager.currentTime() - t) + "ms"; status.markComplete(msg); LOG.info(msg); return totalSize; @@ -386,7 +386,7 @@ public class SplitLogManager extends ZooKeeperListener { // This is a znode path under the splitlog dir with the rest of the path made up of an // url encoding of the passed in log to split. String path = ZKSplitLog.getEncodedNodeName(watcher, taskname); - lastTaskCreateTime = EnvironmentEdgeManager.currentTimeMillis(); + lastTaskCreateTime = EnvironmentEdgeManager.currentTime(); Task oldtask = createTaskIfAbsent(path, batch); if (oldtask == null) { // publish the task in zk @@ -808,7 +808,7 @@ public class SplitLogManager extends ZooKeeperListener { if (task.isUnassigned()) { LOG.info("task " + path + " acquired by " + workerName); } - task.heartbeat(EnvironmentEdgeManager.currentTimeMillis(), new_version, workerName); + task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName); SplitLogCounters.tot_mgr_heartbeat.incrementAndGet(); } else { // duplicate heartbeats - heartbeats w/o zk node version @@ -831,7 +831,7 @@ public class SplitLogManager extends ZooKeeperListener { // 2) after a configurable timeout if the server is not marked as dead but has still not // finished the task. This allows to continue if the worker cannot actually handle it, // for any reason. - final long time = EnvironmentEdgeManager.currentTimeMillis() - task.last_update; + final long time = EnvironmentEdgeManager.currentTime() - task.last_update; final boolean alive = master.getServerManager() != null ? master.getServerManager().isServerOnline(task.cur_worker_name) : true; if (alive && time < timeout) { @@ -863,7 +863,7 @@ public class SplitLogManager extends ZooKeeperListener { if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("failed to resubmit task " + path + " version changed"); - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); return false; } } catch (NoNodeException e) { @@ -873,13 +873,13 @@ public class SplitLogManager extends ZooKeeperListener { getDataSetWatchSuccess(path, null, Integer.MIN_VALUE); } catch (DeserializationException e1) { LOG.debug("Failed to re-resubmit task " + path + " because of deserialization issue", e1); - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); return false; } return false; } catch (KeeperException.BadVersionException e) { LOG.debug("failed to resubmit task " + path + " version changed"); - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); return false; } catch (KeeperException e) { SplitLogCounters.tot_mgr_resubmit_failed.incrementAndGet(); @@ -951,7 +951,7 @@ public class SplitLogManager extends ZooKeeperListener { // might miss the watch-trigger that creation of RESCAN node provides. // Since the TimeoutMonitor will keep resubmitting UNASSIGNED tasks // therefore this behavior is safe. - lastTaskCreateTime = EnvironmentEdgeManager.currentTimeMillis(); + lastTaskCreateTime = EnvironmentEdgeManager.currentTime(); SplitLogTask slt = new SplitLogTask.Done(this.serverName, this.recoveryMode); this.watcher.getRecoverableZooKeeper().getZooKeeper(). create(ZKSplitLog.getRescanNode(watcher), slt.toByteArray(), @@ -1051,7 +1051,7 @@ public class SplitLogManager extends ZooKeeperListener { task = tasks.get(path); if (task != null || ZKSplitLog.isRescanNode(watcher, path)) { if (task != null) { - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); } getDataSetWatch(path, zkretries); } @@ -1107,7 +1107,7 @@ public class SplitLogManager extends ZooKeeperListener { try { this.recoveringRegionLock.lock(); // mark that we're creating recovering znodes - this.lastRecoveringNodeCreationTime = EnvironmentEdgeManager.currentTimeMillis(); + this.lastRecoveringNodeCreationTime = EnvironmentEdgeManager.currentTime(); for (HRegionInfo region : userRegions) { String regionEncodeName = region.getEncodedName(); @@ -1497,7 +1497,7 @@ public class SplitLogManager extends ZooKeeperListener { } } if (tot > 0) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > lastLog + 5000) { lastLog = now; LOG.info("total tasks = " + tot + " unassigned = " + unassigned + " tasks=" + tasks); @@ -1516,7 +1516,7 @@ public class SplitLogManager extends ZooKeeperListener { // master should spawn both a manager and a worker thread to guarantee // that there is always one worker in the system if (tot > 0 && !found_assigned_task && - ((EnvironmentEdgeManager.currentTimeMillis() - lastTaskCreateTime) > + ((EnvironmentEdgeManager.currentTime() - lastTaskCreateTime) > unassignedTimeout)) { for (Map.Entry e : tasks.entrySet()) { String path = e.getKey(); @@ -1546,7 +1546,7 @@ public class SplitLogManager extends ZooKeeperListener { } // Garbage collect left-over /hbase/recovering-regions/... znode - long timeInterval = EnvironmentEdgeManager.currentTimeMillis() + long timeInterval = EnvironmentEdgeManager.currentTime() - lastRecoveringNodeCreationTime; if (!failedRecoveringRegionDeletions.isEmpty() || (tot == 0 && tasks.size() == 0 && (timeInterval > checkRecoveringTimeThreshold))) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java index faa11e3..f111f4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java @@ -326,7 +326,7 @@ public abstract class TableLockManager { .setThreadId(Thread.currentThread().getId()) .setPurpose(purpose) .setIsShared(isShared) - .setCreateTime(EnvironmentEdgeManager.currentTimeMillis()).build(); + .setCreateTime(EnvironmentEdgeManager.currentTime()).build(); byte[] lockMetadata = toBytes(data); InterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(zkWatcher, tableLockZNode, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 99d794d..6a83dc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -90,10 +90,10 @@ public class TableNamespaceManager { // Wait for the namespace table to be assigned. // If timed out, we will move ahead without initializing it. // So that it should be initialized later on lazily. - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT); while (!isTableAssigned()) { - if (EnvironmentEdgeManager.currentTimeMillis() - startTime + 100 > timeout) { + if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) { LOG.warn("Timedout waiting for namespace table to be assigned."); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index ae59f26..79fd21e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -144,7 +144,7 @@ public class FavoredNodeAssignmentHelper { put = MetaTableAccessor.makePutFromRegionInfo(regionInfo); byte[] favoredNodes = getFavoredNodes(favoredNodeList); put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER, - EnvironmentEdgeManager.currentTimeMillis(), favoredNodes); + EnvironmentEdgeManager.currentTime(), favoredNodes); LOG.info("Create the region " + regionInfo.getRegionNameAsString() + " with favored nodes " + Bytes.toString(favoredNodes)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index e353316..8f6314f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -220,7 +220,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { return null; } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); initCosts(cluster); @@ -259,13 +259,13 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { updateCostsWithAction(cluster, undoAction); } - if (EnvironmentEdgeManager.currentTimeMillis() - startTime > + if (EnvironmentEdgeManager.currentTime() - startTime > maxRunningTime) { break; } } - long endTime = EnvironmentEdgeManager.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); metricsBalancer.balanceCluster(endTime - startTime); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java index b65fbf9..46fe971 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java @@ -46,7 +46,7 @@ public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate { @Override public boolean isFileDeletable(FileStatus fStat) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); long time = fStat.getModificationTime(); long life = currentTime - time; if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java index 66b0423..f021954 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java @@ -37,7 +37,7 @@ public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate { @Override public boolean isLogDeletable(FileStatus fStat) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); long time = fStat.getModificationTime(); long life = currentTime - time; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java index d09ea64..53f4108 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java @@ -92,7 +92,7 @@ public class DispatchMergingRegionHandler extends EventHandler { .getEncodedName()) + " is not online now"); return; } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); boolean onSameRS = region_a_location.equals(region_b_location); // Make sure regions are on the same regionserver before send merge @@ -134,7 +134,7 @@ public class DispatchMergingRegionHandler extends EventHandler { // RegionInTransition any more break; } - if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) break; + if ((EnvironmentEdgeManager.currentTime() - startTime) > timeout) break; } catch (InterruptedException e) { InterruptedIOException iioe = new InterruptedIOException(); iioe.initCause(e); @@ -144,7 +144,7 @@ public class DispatchMergingRegionHandler extends EventHandler { } if (onSameRS) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); while (!masterServices.isStopped()) { try { masterServices.getServerManager().sendRegionsMerge(region_a_location, @@ -153,7 +153,7 @@ public class DispatchMergingRegionHandler extends EventHandler { region_a.getEncodedName() + "," + region_b.getEncodedName() + ", focible=" + forcible); break; } catch (RegionOpeningException roe) { - if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) { + if ((EnvironmentEdgeManager.currentTime() - startTime) > timeout) { LOG.warn("Failed sending merge to " + region_a_location + " after " + timeout + "ms", roe); break; @@ -170,7 +170,7 @@ public class DispatchMergingRegionHandler extends EventHandler { LOG.info("Cancel merging regions " + region_a.getRegionNameAsString() + ", " + region_b.getRegionNameAsString() + ", because can't move them together after " - + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms"); + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 3bf704a..31ec098 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -866,7 +866,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param sentinels map of sentinels to clean */ private synchronized void cleanupSentinels(final Map sentinels) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); Iterator> it = sentinels.entrySet().iterator(); while (it.hasNext()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index d9ddba3..f2c6db7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -474,9 +474,9 @@ public class CompactSplitThread implements CompactionRequestor { try { // Note: please don't put single-compaction logic here; // put it into region/store/etc. This is CST logic. - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); boolean completed = region.compact(compaction, store); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " + this + "; duration=" + StringUtils.formatTimeDiff(now, start)); if (completed) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 5a4c904..4417bd9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -360,7 +360,7 @@ public class CompactionTool extends Configured implements Tool { Path stagingDir = JobUtil.getStagingDir(conf); try { // Create input file with the store dirs - Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTimeMillis()); + Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); CompactionInputFormat.createInputFile(fs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index d90357b..759f842 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -150,7 +150,7 @@ public class DefaultMemStore implements MemStore { LOG.warn("Snapshot called again without clearing previous. " + "Doing nothing. Another ongoing flush or did we fail last attempt?"); } else { - this.snapshotId = EnvironmentEdgeManager.currentTimeMillis(); + this.snapshotId = EnvironmentEdgeManager.currentTime(); this.snapshotSize = keySize(); if (!this.kvset.isEmpty()) { this.snapshot = this.kvset; @@ -239,7 +239,7 @@ public class DefaultMemStore implements MemStore { void setOldestEditTimeToNow() { if (timeOfOldestEdit == Long.MAX_VALUE) { - timeOfOldestEdit = EnvironmentEdgeManager.currentTimeMillis(); + timeOfOldestEdit = EnvironmentEdgeManager.currentTime(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 274c1b3..2a8b237 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -754,7 +754,7 @@ public class HRegion implements HeapSize { // , Writable{ // Initialize split policy this.splitPolicy = RegionSplitPolicy.create(this, conf); - this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis(); + this.lastFlushTime = EnvironmentEdgeManager.currentTime(); // Use maximum of log sequenceid or that which was found in stores // (particularly if no recovered edits, seqid will be -1). long nextSeqid = maxSeqId + 1; @@ -1685,7 +1685,7 @@ public class HRegion implements HeapSize { // , Writable{ if (flushCheckInterval <= 0) { //disabled return false; } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); //if we flushed in the recent past, we don't need to do again now if ((now - getLastFlushTime() < flushCheckInterval)) { return false; @@ -1736,7 +1736,7 @@ public class HRegion implements HeapSize { // , Writable{ // Don't flush when server aborting, it's unsafe throw new IOException("Aborting flush because server is aborted..."); } - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); // If nothing to flush, return, but we need to safely update the region sequence id if (this.memstoreSize.get() <= 0) { // Take an update lock because am about to change the sequence id and we want the sequence id @@ -1948,7 +1948,7 @@ public class HRegion implements HeapSize { // , Writable{ } // Record latest flush time - this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis(); + this.lastFlushTime = EnvironmentEdgeManager.currentTime(); // Update the last flushed sequence id for region. TODO: This is dup'd inside the WAL/FSHlog. this.lastFlushSeqId = flushSeqId; @@ -1959,7 +1959,7 @@ public class HRegion implements HeapSize { // , Writable{ notifyAll(); // FindBugs NN_NAKED_NOTIFY } - long time = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long time = EnvironmentEdgeManager.currentTime() - startTime; long memstoresize = this.memstoreSize.get(); String msg = "Finished memstore flush of ~" + StringUtils.byteDesc(totalFlushableSize) + "/" + totalFlushableSize + @@ -2503,7 +2503,7 @@ public class HRegion implements HeapSize { // , Writable{ // we acquire at least one. // ---------------------------------- int numReadyToWrite = 0; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); while (lastIndexExclusive < batchOp.operations.length) { Mutation mutation = batchOp.getMutation(lastIndexExclusive); boolean isPutMutation = mutation instanceof Put; @@ -2588,7 +2588,7 @@ public class HRegion implements HeapSize { // , Writable{ // we should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes are not guaranteed to have a newer timestamp - now = EnvironmentEdgeManager.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTime(); byte[] byteNow = Bytes.toBytes(now); // Nothing to put/delete -- an exception in the above such as NoSuchColumnFamily? @@ -3358,7 +3358,7 @@ public class HRegion implements HeapSize { // , Writable{ 2000); // How often to send a progress report (default 1/2 master timeout) int period = this.conf.getInt("hbase.hstore.report.period", 300000); - long lastReport = EnvironmentEdgeManager.currentTimeMillis(); + long lastReport = EnvironmentEdgeManager.currentTime(); while ((entry = reader.next()) != null) { HLogKey key = entry.getKey(); @@ -3373,7 +3373,7 @@ public class HRegion implements HeapSize { // , Writable{ if (intervalEdits >= interval) { // Number of edits interval reached intervalEdits = 0; - long cur = EnvironmentEdgeManager.currentTimeMillis(); + long cur = EnvironmentEdgeManager.currentTime(); if (lastReport + period <= cur) { status.setStatus("Replaying edits..." + " skipped=" + skippedEdits + @@ -4714,7 +4714,7 @@ public class HRegion implements HeapSize { // , Writable{ meta.checkResources(); // The row key is the region name byte[] row = r.getRegionName(); - final long now = EnvironmentEdgeManager.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList(2); cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, @@ -5013,7 +5013,7 @@ public class HRegion implements HeapSize { // , Writable{ // Short circuit the read only case if (processor.readOnly()) { try { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); doProcessRowWithTimeout( processor, now, this, null, null, timeout); processor.postProcess(this, walEdit, true); @@ -5048,7 +5048,7 @@ public class HRegion implements HeapSize { // , Writable{ // Get a mvcc write number mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { // 4. Let the processor scan the rows, generate mutations and add // waledits @@ -5249,7 +5249,7 @@ public class HRegion implements HeapSize { // , Writable{ // now start my own transaction mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId); w = mvcc.beginMemstoreInsertWithSeqNum(mvccNum); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Process each family for (Map.Entry> family : append.getFamilyCellMap().entrySet()) { @@ -5467,7 +5467,7 @@ public class HRegion implements HeapSize { // , Writable{ // now start my own transaction mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId); w = mvcc.beginMemstoreInsertWithSeqNum(mvccNum); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Process each family for (Map.Entry> family: increment.getFamilyCellMap().entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8e62620..4ce6173 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -2037,7 +2037,7 @@ public class HRegionServer extends HasThread implements rpcServices.requestCount.set(0); LOG.info("reportForDuty to master=" + masterServerName + " with port=" + rpcServices.isa.getPort() + ", startcode=" + this.startcode); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); int port = rpcServices.isa.getPort(); RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); request.setPort(port); @@ -2714,7 +2714,7 @@ public class HRegionServer extends HasThread implements public MovedRegionInfo(ServerName serverName, long closeSeqNum) { this.serverName = serverName; this.seqNum = closeSeqNum; - ts = EnvironmentEdgeManager.currentTimeMillis(); + ts = EnvironmentEdgeManager.currentTime(); } public ServerName getServerName() { @@ -2756,7 +2756,7 @@ public class HRegionServer extends HasThread implements private MovedRegionInfo getMovedRegion(final String encodedRegionName) { MovedRegionInfo dest = movedRegions.get(encodedRegionName); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (dest != null) { if (dest.getMoveTime() > (now - TIMEOUT_REGION_MOVED)) { return dest; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index dc593ac..859db17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -769,7 +769,7 @@ public class HStore implements Store { + " into store " + this + " (new location: " + dstPath + ")"); if (LOG.isTraceEnabled()) { String traceMessage = "BULK LOAD time,size,store size,store files [" - + EnvironmentEdgeManager.currentTimeMillis() + "," + r.length() + "," + storeSize + + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; LOG.trace(traceMessage); } @@ -1013,7 +1013,7 @@ public class HStore implements Store { totalSize += sf.getReader().length(); } String traceMessage = "FLUSH time,count,size,store size,store files [" - + EnvironmentEdgeManager.currentTimeMillis() + "," + sfs.size() + "," + totalSize + + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; LOG.trace(traceMessage); } @@ -1142,7 +1142,7 @@ public class HStore implements Store { + " into tmpdir=" + fs.getTempDir() + ", totalSize=" + StringUtils.humanReadableInt(cr.getSize())); - long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis(); + long compactionStartTime = EnvironmentEdgeManager.currentTime(); List sfs = null; try { // Commence the compaction. @@ -1246,7 +1246,7 @@ public class HStore implements Store { */ private void logCompactionEndMessage( CompactionRequest cr, List sfs, long compactionStartTime) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); StringBuilder message = new StringBuilder( "Completed" + (cr.isMajor() ? " major" : "") + " compaction of " + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in " @@ -1518,7 +1518,7 @@ public class HStore implements Store { long cfTtl = getStoreFileTtl(); if (cfTtl != Long.MAX_VALUE) { delSfs = storeEngine.getStoreFileManager().getUnneededFiles( - EnvironmentEdgeManager.currentTimeMillis() - cfTtl, filesCompacting); + EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting); addToCompactingFiles(delSfs); } } @@ -2017,7 +2017,7 @@ public class HStore implements Store { this.lock.readLock().lock(); try { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); return this.memstore.updateColumnValue(row, f, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index 9ff7741..4673d0d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -290,7 +290,7 @@ public class Leases extends HasThread { } public long getDelay(TimeUnit unit) { - return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTimeMillis(), + return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS); } @@ -305,7 +305,7 @@ public class Leases extends HasThread { * Resets the expiration time of the lease. */ public void resetExpirationTime() { - this.expirationTime = EnvironmentEdgeManager.currentTimeMillis() + this.leaseTimeoutPeriod; + this.expirationTime = EnvironmentEdgeManager.currentTime() + this.leaseTimeoutPeriod; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index eed98e5..310cd07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -375,7 +375,7 @@ class MemStoreFlusher implements FlushRequester { if (!region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) { if (fqe.isMaximumWait(this.blockingWaitTime)) { - LOG.info("Waited " + (EnvironmentEdgeManager.currentTimeMillis() - fqe.createTime) + + LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) + "ms on a compaction to clean up 'too many store files'; waited " + "long enough... proceeding with flush of " + region.getRegionNameAsString()); @@ -504,7 +504,7 @@ class MemStoreFlusher implements FlushRequester { if (Trace.isTracing()) { scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark."); } - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); synchronized (this.blockSignal) { boolean blocked = false; long startTime = 0; @@ -512,7 +512,7 @@ class MemStoreFlusher implements FlushRequester { try { while (isAboveHighWaterMark() && !server.isStopped()) { if (!blocked) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); LOG.info("Blocking updates on " + server.toString() + ": the global memstore size " + StringUtils.humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreSize()) + @@ -529,7 +529,7 @@ class MemStoreFlusher implements FlushRequester { LOG.warn("Interrupted while waiting"); interrupted = true; } - long took = EnvironmentEdgeManager.currentTimeMillis() - start; + long took = EnvironmentEdgeManager.currentTime() - start; LOG.warn("Memstore is above high water mark and block " + took + "ms"); } } finally { @@ -539,7 +539,7 @@ class MemStoreFlusher implements FlushRequester { } if(blocked){ - final long totalTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + final long totalTime = EnvironmentEdgeManager.currentTime() - startTime; if(totalTime > 0){ this.updatesBlockedMsHighWater.add(totalTime); } @@ -643,7 +643,7 @@ class MemStoreFlusher implements FlushRequester { FlushRegionEntry(final HRegion r) { this.region = r; - this.createTime = EnvironmentEdgeManager.currentTimeMillis(); + this.createTime = EnvironmentEdgeManager.currentTime(); this.whenToExpire = this.createTime; } @@ -652,7 +652,7 @@ class MemStoreFlusher implements FlushRequester { * @return True if we have been delayed > maximumWait milliseconds. */ public boolean isMaximumWait(final long maximumWait) { - return (EnvironmentEdgeManager.currentTimeMillis() - this.createTime) > maximumWait; + return (EnvironmentEdgeManager.currentTime() - this.createTime) > maximumWait; } /** @@ -665,19 +665,19 @@ class MemStoreFlusher implements FlushRequester { /** * @param when When to expire, when to come up out of the queue. - * Specify in milliseconds. This method adds EnvironmentEdgeManager.currentTimeMillis() + * Specify in milliseconds. This method adds EnvironmentEdgeManager.currentTime() * to whatever you pass. * @return This. */ public FlushRegionEntry requeue(final long when) { - this.whenToExpire = EnvironmentEdgeManager.currentTimeMillis() + when; + this.whenToExpire = EnvironmentEdgeManager.currentTime() + when; this.requeueCount++; return this; } @Override public long getDelay(TimeUnit unit) { - return unit.convert(this.whenToExpire - EnvironmentEdgeManager.currentTimeMillis(), + return unit.convert(this.whenToExpire - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 5da1ea1..ff39a1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -460,7 +460,7 @@ class MetricsRegionServerWrapperImpl //Compute the number of requests per second - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); // assume that it took PERIOD seconds to start the executor. // this is a guess but it's a pretty good one. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 0f89a8b..bde18d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -394,7 +394,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ private Result append(final HRegion region, final MutationProto m, final CellScanner cellScanner, long nonceGroup) throws IOException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); Append append = ProtobufUtil.toAppend(m, cellScanner); Result r = null; if (region.getCoprocessorHost() != null) { @@ -415,7 +415,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateAppend( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } return r; } @@ -430,7 +430,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ private Result increment(final HRegion region, final MutationProto mutation, final CellScanner cells, long nonceGroup) throws IOException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); Increment increment = ProtobufUtil.toIncrement(mutation, cells); Result r = null; if (region.getCoprocessorHost() != null) { @@ -451,7 +451,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateIncrement( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } return r; } @@ -569,7 +569,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region, final List mutations, final CellScanner cells) { Mutation[] mArray = new Mutation[mutations.size()]; - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); boolean batchContainsPuts = false, batchContainsDelete = false; try { int i = 0; @@ -622,7 +622,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } if (regionServer.metricsRegionServer != null) { - long after = EnvironmentEdgeManager.currentTimeMillis(); + long after = EnvironmentEdgeManager.currentTime(); if (batchContainsPuts) { regionServer.metricsRegionServer.updatePut(after - before); } @@ -645,7 +645,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private OperationStatus [] doReplayBatchOp(final HRegion region, final List mutations, long replaySeqId) throws IOException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); boolean batchContainsPuts = false, batchContainsDelete = false; try { for (Iterator it = mutations.iterator(); it.hasNext();) { @@ -677,7 +677,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, new HLogSplitter.MutationReplay[mutations.size()]), replaySeqId); } finally { if (regionServer.metricsRegionServer != null) { - long after = EnvironmentEdgeManager.currentTimeMillis(); + long after = EnvironmentEdgeManager.currentTime(); if (batchContainsPuts) { regionServer.metricsRegionServer.updatePut(after - before); } @@ -1351,7 +1351,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @QosPriority(priority = HConstants.REPLAY_QOS) public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); CellScanner cells = ((PayloadCarryingRpcController) controller).cellScanner(); try { checkOpen(); @@ -1428,7 +1428,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } finally { if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateReplay( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } } } @@ -1621,7 +1621,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @Override public GetResponse get(final RpcController controller, final GetRequest request) throws ServiceException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); try { checkOpen(); requestCount.increment(); @@ -1671,7 +1671,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } finally { if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateGet( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java index a826675..9dd6c0f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java @@ -64,7 +64,7 @@ class RegionMergeRequest implements Runnable { return; } try { - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_b, forcible); @@ -116,7 +116,7 @@ class RegionMergeRequest implements Runnable { + region_a + ", region_b=" + region_b + ",merged region=" + mt.getMergedRegionInfo().getRegionNameAsString() + ". Region merge took " - + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime)); + + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime)); } catch (IOException ex) { ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; LOG.error("Merge failed " + this, ex); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java index cb28c9a..fd5fff5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java @@ -442,7 +442,7 @@ public class RegionMergeTransaction { */ public static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b) { - long rid = EnvironmentEdgeManager.currentTimeMillis(); + long rid = EnvironmentEdgeManager.currentTime(); // Regionid is timestamp. Merged region's id can't be less than that of // merging regions else will insert at wrong location in hbase:meta if (rid < a.getRegionId() || rid < b.getRegionId()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 205c7f8..f2e8bfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -337,7 +337,7 @@ public class ScanQueryMatcher { if ((!isUserScan) && timeToPurgeDeletes > 0 - && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) + && (EnvironmentEdgeManager.currentTime() - timestamp) <= timeToPurgeDeletes) { return MatchCode.INCLUDE; } else if (retainDeletesInOutput || mvccVersion > maxReadPointToTrackVersions) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index faf9138..421f54e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -95,7 +95,7 @@ public class ServerNonceManager { } public void reportActivity() { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); this.data = (this.data & ALL_FLAG_BITS) | (now << 3); } @@ -237,7 +237,7 @@ public class ServerNonceManager { public void reportOperationFromWal(long group, long nonce, long writeTime) { if (nonce == HConstants.NO_NONCE) return; // Give the write time some slack in case the clocks are not synchronized. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > writeTime + (deleteNonceGracePeriod * 1.5)) return; OperationContext newResult = new OperationContext(); newResult.setState(OperationContext.DONT_PROCEED); @@ -267,7 +267,7 @@ public class ServerNonceManager { } private void cleanUpOldNonces() { - long cutoff = EnvironmentEdgeManager.currentTimeMillis() - deleteNonceGracePeriod; + long cutoff = EnvironmentEdgeManager.currentTime() - deleteNonceGracePeriod; for (Map.Entry entry : nonces.entrySet()) { OperationContext oc = entry.getValue(); if (!oc.isExpired(cutoff)) continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 6ade099..c2270e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -454,7 +454,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { @Override public boolean progress() { - long t = EnvironmentEdgeManager.currentTimeMillis(); + long t = EnvironmentEdgeManager.currentTime(); if ((t - last_report_at) > reportPeriod) { last_report_at = t; int latestZKVersion = attemptToOwnTask(false, watcher, serverName, curTask, mode, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 30b55dd..2eb01f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -176,7 +176,7 @@ public class SplitTransaction { * @return Daughter region id (timestamp) to use. */ private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { - long rid = EnvironmentEdgeManager.currentTimeMillis(); + long rid = EnvironmentEdgeManager.currentTime(); // Regionid is timestamp. Can't be less than that of parent else will insert // at wrong location in hbase:meta (See HBASE-710). if (rid < hri.getRegionId()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 1ef3e91..7160b30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -122,7 +122,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner explicitColumnQuery = numCol > 0; this.scan = scan; this.columns = columns; - oldestUnexpiredTS = EnvironmentEdgeManager.currentTimeMillis() - ttl; + oldestUnexpiredTS = EnvironmentEdgeManager.currentTime() - ttl; this.minVersions = minVersions; if (store != null && ((HStore)store).getHRegion() != null diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 1665713..92a86e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -79,7 +79,7 @@ public class StorefileRefresherChore extends Chore { continue; } String encodedName = r.getRegionInfo().getEncodedName(); - long time = EnvironmentEdgeManager.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); if (!lastRefreshTimes.containsKey(encodedName)) { lastRefreshTimes.put(encodedName, time); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 5c9a233..0fc64d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -63,7 +63,7 @@ public class CompactionRequest implements Comparable { * This ctor should be used by coprocessors that want to subclass CompactionRequest. */ public CompactionRequest() { - this.selectionTime = EnvironmentEdgeManager.currentTimeMillis(); + this.selectionTime = EnvironmentEdgeManager.currentTime(); this.timeInNanos = System.nanoTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 22697e9..9edb317 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -303,7 +303,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { if (cfTtl == Long.MAX_VALUE) { return null; // minversion might be set, cannot delete old files } - long timestampCutoff = EnvironmentEdgeManager.currentTimeMillis() - cfTtl; + long timestampCutoff = EnvironmentEdgeManager.currentTime() - cfTtl; // Merge the longest sequence of stripes where all files have expired, if any. int start = -1, bestStart = -1, length = 0, bestLength = 0; ArrayList> stripes = si.getStripes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 8474836..99c6254 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1919,7 +1919,7 @@ class FSHLog implements HLog, Syncable { // TODO: WORK ON MAKING THIS APPEND FASTER. DOING WAY TOO MUCH WORK WITH CPs, PBing, etc. atHeadOfRingBufferEventHandlerAppend(); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); byte [] encodedRegionName = entry.getKey().getEncodedRegionName(); long regionSequenceId = HLog.NO_SEQUENCE_ID; try { @@ -1962,7 +1962,7 @@ class FSHLog implements HLog, Syncable { coprocessorHost.postWALWrite(entry.getHRegionInfo(), entry.getKey(), entry.getEdit()); // Update metrics. - postAppend(entry, EnvironmentEdgeManager.currentTimeMillis() - start); + postAppend(entry, EnvironmentEdgeManager.currentTime() - start); } catch (Exception e) { LOG.fatal("Could not append. Requesting close of hlog", e); requestLogRoll(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java index 276e16c..41d0910 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java @@ -103,7 +103,7 @@ public class HLogFactory { // A hlog file could be under recovery, so it may take several // tries to get it open. Instead of claiming it is corrupted, retry // to open it up to 5 minutes by default. - long startWaiting = EnvironmentEdgeManager.currentTimeMillis(); + long startWaiting = EnvironmentEdgeManager.currentTime(); long openTimeout = conf.getInt("hbase.hlog.open.timeout", 300000) + startWaiting; int nbAttempt = 0; while (true) { @@ -138,9 +138,9 @@ public class HLogFactory { if (reporter != null && !reporter.progress()) { throw new InterruptedIOException("Operation is cancelled"); } - if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTimeMillis()) { + if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { LOG.error("Can't open after " + nbAttempt + " attempts and " - + (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); } else { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java index cc10cb2..60eda98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java @@ -209,7 +209,7 @@ public class HLogKey implements WritableComparable, SequenceNumber { */ public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum, long nonceGroup, long nonce) { - init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTimeMillis(), + init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTime(), EMPTY_UUIDS, nonceGroup, nonce); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 2df9f50..111efef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -1693,14 +1693,14 @@ public class HLogSplitter { private HRegionLocation waitUntilRegionOnline(HRegionLocation loc, byte[] row, final long timeout, AtomicBoolean isRecovering) throws IOException { - final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout; + final long endTime = EnvironmentEdgeManager.currentTime() + timeout; final long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); boolean reloadLocation = false; TableName tableName = loc.getRegionInfo().getTable(); int tries = 0; Throwable cause = null; - while (endTime > EnvironmentEdgeManager.currentTimeMillis()) { + while (endTime > EnvironmentEdgeManager.currentTime()) { try { // Try and get regioninfo from the hosting server. HConnection hconn = getConnectionByTableName(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 4b38027..9ba3353 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -268,7 +268,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createFlushWALEdit(HRegionInfo hri, FlushDescriptor f) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, FLUSH, - EnvironmentEdgeManager.currentTimeMillis(), f.toByteArray()); + EnvironmentEdgeManager.currentTime(), f.toByteArray()); return new WALEdit().add(kv); } @@ -282,7 +282,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createRegionEventWALEdit(HRegionInfo hri, RegionEventDescriptor regionEventDesc) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, REGION_EVENT, - EnvironmentEdgeManager.currentTimeMillis(), regionEventDesc.toByteArray()); + EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray()); return new WALEdit().add(kv); } @@ -301,7 +301,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createCompaction(final HRegionInfo hri, final CompactionDescriptor c) { byte [] pbbytes = c.toByteArray(); KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, - EnvironmentEdgeManager.currentTimeMillis(), pbbytes); + EnvironmentEdgeManager.currentTime(), pbbytes); return new WALEdit().add(kv); //replication scope null so that this won't be replicated } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 97e9b86..58bbe83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -120,7 +120,7 @@ public class WALEditsReplaySink { regionEntries.add(entry); } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); // replaying edits by region for (Map.Entry> _entry : entriesByRegion.entrySet()) { @@ -139,7 +139,7 @@ public class WALEditsReplaySink { } } - long endTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long endTime = EnvironmentEdgeManager.currentTime() - startTime; LOG.debug("number of rows:" + entries.size() + " are sent by batch! spent " + endTime + "(ms)!"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 94dec7c..eadaead 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -83,7 +83,7 @@ public class MetricsSource { * @param timestamp write time of the edit */ public void setAgeOfLastShippedOp(long timestamp) { - long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp; + long age = EnvironmentEdgeManager.currentTime() - timestamp; rms.setGauge(ageOfLastShippedOpKey, age); rms.setGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, age); this.lastTimestamp = timestamp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java index 0aba8a6..93c2ee8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java @@ -42,7 +42,7 @@ public class ReplicationThrottler { this.enabled = this.bandwidth > 0; if (this.enabled) { this.cyclePushSize = 0; - this.cycleStartTick = EnvironmentEdgeManager.currentTimeMillis(); + this.cycleStartTick = EnvironmentEdgeManager.currentTime(); } } @@ -67,7 +67,7 @@ public class ReplicationThrottler { } long sleepTicks = 0; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // 1. if cyclePushSize exceeds bandwidth, we need to sleep some // following cycles to amortize, this case can occur when a single push // exceeds the bandwidth @@ -115,7 +115,7 @@ public class ReplicationThrottler { */ public void resetStartTick() { if (this.enabled) { - this.cycleStartTick = EnvironmentEdgeManager.currentTimeMillis(); + this.cycleStartTick = EnvironmentEdgeManager.currentTime(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 53d3f35..bda02b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -657,7 +657,7 @@ public class AccessController extends BaseMasterAndRegionObserver // any cells found there inclusively. long latestTs = Math.max(opTs, latestCellTs); if (latestTs == 0 || latestTs == HConstants.LATEST_TIMESTAMP) { - latestTs = EnvironmentEdgeManager.currentTimeMillis(); + latestTs = EnvironmentEdgeManager.currentTime(); } get.setTimeRange(0, latestTs + 1); // In case of Put operation we set to read all versions. This was done to consider the case diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 2892c51..5cdddb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -126,7 +126,7 @@ public class AuthenticationTokenSecretManager @Override protected byte[] createPassword(AuthenticationTokenIdentifier identifier) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey secretKey = currentKey; identifier.setKeyId(secretKey.getKeyId()); identifier.setIssueDate(now); @@ -139,7 +139,7 @@ public class AuthenticationTokenSecretManager @Override public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) throws InvalidToken { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (identifier.getExpirationDate() < now) { throw new InvalidToken("Token has expired"); } @@ -223,7 +223,7 @@ public class AuthenticationTokenSecretManager return; } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); Iterator iter = allKeys.values().iterator(); while (iter.hasNext()) { AuthenticationKey key = iter.next(); @@ -247,7 +247,7 @@ public class AuthenticationTokenSecretManager return; } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey prev = currentKey; AuthenticationKey newKey = new AuthenticationKey(++idSeq, Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key @@ -314,7 +314,7 @@ public class AuthenticationTokenSecretManager isMaster = true; while (!stopped) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // clear any expired removeExpiredKeys(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 203f6de..03ba8b8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -231,9 +231,9 @@ public class SnapshotDescriptionUtils { // set the creation time, if one hasn't been set long time = snapshot.getCreationTime(); if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { - time = EnvironmentEdgeManager.currentTimeMillis(); + time = EnvironmentEdgeManager.currentTime(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" - + EnvironmentEdgeManager.currentTimeMillis() + ")."); + + EnvironmentEdgeManager.currentTime() + ")."); SnapshotDescription.Builder builder = snapshot.toBuilder(); builder.setCreationTime(time); snapshot = builder.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 3df9e8b..da76251 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -179,7 +179,7 @@ public class ConnectionCache { private boolean closed; ConnectionInfo(HConnection conn, String user) { - lastAccessTime = EnvironmentEdgeManager.currentTimeMillis(); + lastAccessTime = EnvironmentEdgeManager.currentTime(); connection = conn; closed = false; userName = user; @@ -194,13 +194,13 @@ public class ConnectionCache { connections.remove(userName); return false; } - lastAccessTime = EnvironmentEdgeManager.currentTimeMillis(); + lastAccessTime = EnvironmentEdgeManager.currentTime(); return true; } synchronized boolean timedOut(int maxIdleTime) { long timeoutTime = lastAccessTime + maxIdleTime; - if (EnvironmentEdgeManager.currentTimeMillis() > timeoutTime) { + if (EnvironmentEdgeManager.currentTime() > timeoutTime) { connections.remove(userName); closed = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index acdbd08..cb8f751 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -171,7 +171,7 @@ public class FSHDFSUtils extends FSUtils { final Configuration conf, final CancelableProgressable reporter) throws IOException { LOG.info("Recovering lease on dfs file " + p); - long startWaiting = EnvironmentEdgeManager.currentTimeMillis(); + long startWaiting = EnvironmentEdgeManager.currentTime(); // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves // beyond that limit 'to be safe'. @@ -200,8 +200,8 @@ public class FSHDFSUtils extends FSUtils { } else { // Cycle here until subsequentPause elapses. While spinning, check isFileClosed if // available (should be in hadoop 2.0.5... not in hadoop 1 though. - long localStartWaiting = EnvironmentEdgeManager.currentTimeMillis(); - while ((EnvironmentEdgeManager.currentTimeMillis() - localStartWaiting) < + long localStartWaiting = EnvironmentEdgeManager.currentTime(); + while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPause) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { @@ -232,7 +232,7 @@ public class FSHDFSUtils extends FSUtils { boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p, final long startWaiting) { - if (recoveryTimeout < EnvironmentEdgeManager.currentTimeMillis()) { + if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { LOG.warn("Cannot recoverLease after trying for " + conf.getInt("hbase.lease.recovery.timeout", 900000) + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + @@ -279,7 +279,7 @@ public class FSHDFSUtils extends FSUtils { */ private String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) { return "attempt=" + nbAttempt + " on file=" + p + " after " + - (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms"; + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 0f21b72..32817df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1731,7 +1731,7 @@ public abstract class FSUtils { public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest) throws IOException { // set the modify time for TimeToLive Cleaner - fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1); + fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1); return fs.rename(src, dest); } @@ -1810,7 +1810,7 @@ public abstract class FSUtils { throws IOException { FileSystem fs = FileSystem.get(conf); Path rootPath = FSUtils.getRootDir(conf); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); Path queryPath; // The table files are in ${hbase.rootdir}/data///* if (null == desiredTable) { @@ -1901,7 +1901,7 @@ public abstract class FSUtils { } } - long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long overhead = EnvironmentEdgeManager.currentTime() - startTime; String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms"; LOG.info(overheadMsg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java index e33bf0c..dcb072e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java @@ -39,7 +39,7 @@ public class ManualEnvironmentEdge implements EnvironmentEdge { } @Override - public long currentTimeMillis() { + public long currentTime() { return this.value; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java index a7afc10..3e069d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java @@ -50,7 +50,7 @@ public class TableLockChecker { public void checkTableLocks() throws IOException { TableLockManager tableLockManager = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null); - final long expireDate = EnvironmentEdgeManager.currentTimeMillis() - expireTimeout; + final long expireDate = EnvironmentEdgeManager.currentTime() - expireTimeout; MetadataHandler handler = new MetadataHandler() { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java index c666c14..7e49df5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java @@ -167,7 +167,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock { throws IOException, InterruptedException { boolean hasTimeout = timeoutMs != -1; long waitUntilMs = - hasTimeout ?EnvironmentEdgeManager.currentTimeMillis() + timeoutMs : -1; + hasTimeout ?EnvironmentEdgeManager.currentTime() + timeoutMs : -1; String createdZNode; try { createdZNode = createLockZNode(); @@ -196,7 +196,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock { if (ZKUtil.setWatchIfNodeExists(zkWatcher, zkPathToWatch)) { // Wait for the watcher to fire if (hasTimeout) { - long remainingMs = waitUntilMs - EnvironmentEdgeManager.currentTimeMillis(); + long remainingMs = waitUntilMs - EnvironmentEdgeManager.currentTime(); if (remainingMs < 0 || !deletedLatch.await(remainingMs, TimeUnit.MILLISECONDS)) { LOG.warn("Unable to acquire the lock in " + timeoutMs + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index b59db4e..34bd90b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4659,7 +4659,7 @@ public class TestFromClientSide { HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, Integer.MAX_VALUE); - final long ts = EnvironmentEdgeManager.currentTimeMillis(); + final long ts = EnvironmentEdgeManager.currentTime(); Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(); @@ -4696,7 +4696,7 @@ public class TestFromClientSide { final HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, 3); - final long ts = EnvironmentEdgeManager.currentTimeMillis(); + final long ts = EnvironmentEdgeManager.currentTime(); final Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index a7b3319..11a1857 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -472,14 +472,14 @@ public class TestScannersFromClientSide { HRegionServer rs = cluster.getRegionServer(i); ProtobufUtil.closeRegion( rs.getRSRpcServices(), rs.getServerName(), regionName); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long timeOut = 300000; while (true) { if (rs.getOnlineRegion(regionName) == null) { break; } assertTrue("Timed out in closing the testing region", - EnvironmentEdgeManager.currentTimeMillis() < startTime + timeOut); + EnvironmentEdgeManager.currentTime() < startTime + timeOut); Thread.sleep(500); } @@ -489,13 +489,13 @@ public class TestScannersFromClientSide { states.regionOffline(hri); states.updateRegionState(hri, State.OPENING); ProtobufUtil.openRegion(rs.getRSRpcServices(), rs.getServerName(), hri); - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); while (true) { if (rs.getOnlineRegion(regionName) != null) { break; } assertTrue("Timed out in open the testing region", - EnvironmentEdgeManager.currentTimeMillis() < startTime + timeOut); + EnvironmentEdgeManager.currentTime() < startTime + timeOut); Thread.sleep(500); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 27b807f..b7319b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -473,12 +473,12 @@ public class TestRegionObserverInterface { @Override public void postCompact(ObserverContext e, Store store, StoreFile resultFile) { - lastCompaction = EnvironmentEdgeManager.currentTimeMillis(); + lastCompaction = EnvironmentEdgeManager.currentTime(); } @Override public void postFlush(ObserverContext e) { - lastFlush = EnvironmentEdgeManager.currentTimeMillis(); + lastFlush = EnvironmentEdgeManager.currentTime(); } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 0264d76..05d86b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -187,7 +187,7 @@ public class TestWALObserver { assertFalse(modifiedFamily1); // it's where WAL write cp should occur. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); log.append(hri, hri.getTable(), edit, now, htd, sequenceId); // the edit shall have been change now by the coprocessor. @@ -242,7 +242,7 @@ public class TestWALObserver { HLog wal = createWAL(this.conf); // Put p = creatPutWith2Families(TEST_ROW); WALEdit edit = new WALEdit(); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // addFamilyMapToWALEdit(p.getFamilyMap(), edit); final int countPerFamily = 1000; // for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 50a9b9f5..c7050c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -375,7 +375,7 @@ public class TestCacheOnWrite { .setDataBlockEncoding(encoder.getDataBlockEncoding()) ); int rowIdx = 0; - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); for (int iFile = 0; iFile < 5; ++iFile) { for (int iRow = 0; iRow < 500; ++iRow) { String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 243ff8d..af982d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -111,7 +111,7 @@ public class TestScannerSelectionUsingTTL { HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); long version = 0; //make sure each new set of Put's have a new ts for (int iFile = 0; iFile < totalNumFiles; ++iFile) { if (iFile == NUM_EXPIRED_FILES) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 02847d1..17199aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -698,11 +698,11 @@ public class TestAssignmentManagerOnCluster { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); // Region will be opened, but it won't complete master.assignRegion(hri); - long end = EnvironmentEdgeManager.currentTimeMillis() + 20000; + long end = EnvironmentEdgeManager.currentTime() + 20000; // Wait till postOpen is called while (!MyRegionObserver.postOpenCalled ) { assertFalse("Timed out waiting for postOpen to be called", - EnvironmentEdgeManager.currentTimeMillis() > end); + EnvironmentEdgeManager.currentTime() > end); Thread.sleep(300); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java index c9fde2d..299fb06 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java @@ -61,7 +61,7 @@ public class TestClusterStatusPublisher { @Override protected List> getDeadServers(long since) { List> res = new ArrayList>(); - switch ((int) EnvironmentEdgeManager.currentTimeMillis()) { + switch ((int) EnvironmentEdgeManager.currentTime()) { case 2: res.add(new Pair(ServerName.valueOf("hn", 10, 10), 1L)); break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 289b630..b57adcc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -1013,18 +1013,18 @@ public class TestDistributedLogSplitting { rsts.get(1).getRegionServer().abort("testing"); rsts.get(2).getRegionServer().abort("testing"); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (cluster.getLiveRegionServerThreads().size() > (NUM_RS - 3)) { - if (EnvironmentEdgeManager.currentTimeMillis() - start > 60000) { + if (EnvironmentEdgeManager.currentTime() - start > 60000) { assertTrue(false); } Thread.sleep(200); } - start = EnvironmentEdgeManager.currentTimeMillis(); + start = EnvironmentEdgeManager.currentTime(); while (HBaseTestingUtility.getAllOnlineRegions(cluster).size() < (NUM_REGIONS_TO_CREATE + 1)) { - if (EnvironmentEdgeManager.currentTimeMillis() - start > 60000) { + if (EnvironmentEdgeManager.currentTime() - start > 60000) { assertTrue("Timedout", false); } Thread.sleep(200); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 16321b4..9d2dda2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -182,7 +182,7 @@ public class TestSnapshotFromMaster { Mockito.when(mockHandler.getSnapshot()).thenReturn(desc); Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true)); Mockito.when(mockHandler.getCompletionTimestamp()) - .thenReturn(EnvironmentEdgeManager.currentTimeMillis()); + .thenReturn(EnvironmentEdgeManager.currentTime()); master.getSnapshotManagerForTesting() .setSnapshotHandlerForTesting(TABLE_NAME, mockHandler); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index 4f8794c..52f261a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -74,7 +74,7 @@ public class TestKeepDeletes { * compact timestamps are tracked. Otherwise, forced major compaction will not purge * Delete's having the same timestamp. see ScanQueryMatcher.match(): * if (retainDeletesInOutput - * || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) + * || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp) * <= timeToPurgeDeletes) ... ) * */ @@ -99,7 +99,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -196,7 +196,7 @@ public class TestKeepDeletes { HConstants.FOREVER, false); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -241,7 +241,7 @@ public class TestKeepDeletes { HConstants.FOREVER, false); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -309,7 +309,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -399,7 +399,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Delete d = new Delete(T1, ts); d.deleteColumns(c0, c0, ts); @@ -442,7 +442,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); @@ -505,7 +505,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); p.add(c0, c1, T1); @@ -587,7 +587,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -679,7 +679,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); @@ -730,7 +730,7 @@ public class TestKeepDeletes { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; // 2s in the past + long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past Put p = new Put(T1, ts); p.add(c0, c0, T3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 73a712f..cbb9018 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -69,7 +69,7 @@ public class TestMinVersions { try { // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; Put p = new Put(T1, ts); p.add(c0, c0, T1); @@ -116,7 +116,7 @@ public class TestMinVersions { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, false); HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { Put p = new Put(T1, ts-1); @@ -171,7 +171,7 @@ public class TestMinVersions { HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { Put p = new Put(T1, ts-2); @@ -229,7 +229,7 @@ public class TestMinVersions { HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { // 2nd version @@ -305,7 +305,7 @@ public class TestMinVersions { try { // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; // 1st version Put p = new Put(T1, ts-3); @@ -396,7 +396,7 @@ public class TestMinVersions { final byte [] c1 = COLUMNS[1]; // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { Put p = new Put(T1, ts-3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 4945ad1..0465b93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -94,7 +94,7 @@ public class TestQueryMatcher extends HBaseTestCase { // 2,4,5 ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, ttl, false, 0, rowComparator), get.getFamilyMap().get(fam2), - EnvironmentEdgeManager.currentTimeMillis() - ttl); + EnvironmentEdgeManager.currentTime() - ttl); List memstore = new ArrayList(); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); @@ -176,7 +176,7 @@ public class TestQueryMatcher extends HBaseTestCase { ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, ttl, false, 0, rowComparator), null, - EnvironmentEdgeManager.currentTimeMillis() - ttl); + EnvironmentEdgeManager.currentTime() - ttl); List memstore = new ArrayList(); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); @@ -227,7 +227,7 @@ public class TestQueryMatcher extends HBaseTestCase { ScanQueryMatcher.MatchCode.DONE }; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, testTTL, false, 0, rowComparator), get.getFamilyMap().get(fam2), now - testTTL); @@ -281,7 +281,7 @@ public class TestQueryMatcher extends HBaseTestCase { ScanQueryMatcher.MatchCode.DONE }; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, testTTL, false, 0, rowComparator), null, now - testTTL); @@ -336,7 +336,7 @@ public class TestQueryMatcher extends HBaseTestCase { private void testDropDeletes( byte[] from, byte[] to, byte[][] rows, MatchCode... expected) throws IOException { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Set time to purge deletes to negative value to avoid it ever happening. ScanInfo scanInfo = new ScanInfo(fam2, 0, 1, ttl, false, -1L, rowComparator); NavigableSet cols = get.getFamilyMap().get(fam2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index db083ee..c9ad8bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -133,10 +133,10 @@ public class TestRegionMergeTransactionOnCluster { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); AssignmentManager am = cluster.getMaster().getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri, State.MERGED)) { assertFalse("Timed out in waiting one merged region to be in state MERGED", - EnvironmentEdgeManager.currentTimeMillis() - start > 60000); + EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 1758316..3acabf2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -799,10 +799,10 @@ public class TestSplitTransactionOnCluster { hri = region.getRegionInfo(); // split parent AssignmentManager am = cluster.getMaster().getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri, State.SPLIT)) { assertFalse("Timed out in waiting split parent to be in state SPLIT", - EnvironmentEdgeManager.currentTimeMillis() - start > 60000); + EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 9600932..123ff0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -297,7 +297,7 @@ public class TestStore { // store files will be (this.store.ttl / storeFileNum) for (int i = 1; i <= storeFileNum; i++) { LOG.info("Adding some data for the store file #" + i); - timeStamp = EnvironmentEdgeManager.currentTimeMillis(); + timeStamp = EnvironmentEdgeManager.currentTime(); this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null)); this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null)); this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null)); @@ -661,7 +661,7 @@ public class TestStore { long oldValue = 1L; long newValue = 3L; this.store.add(new KeyValue(row, family, qf1, - EnvironmentEdgeManager.currentTimeMillis(), + EnvironmentEdgeManager.currentTime(), Bytes.toBytes(oldValue))); // snapshot the store. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index f67ab1f..68a99aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -116,7 +116,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { final byte[] v3 = Bytes.toBytes("v3"); htable1 = new HTable(conf1, tableName); - long t = EnvironmentEdgeManager.currentTimeMillis(); + long t = EnvironmentEdgeManager.currentTime(); // create three versions for "row" Put put = new Put(row); put.add(famName, row, t, v1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index e3ea397..fd8c9a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -367,7 +367,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { try { // Store read only ACL at a future time Put p = new Put(TEST_ROW).add(TEST_FAMILY1, TEST_Q1, - EnvironmentEdgeManager.currentTimeMillis() + 1000000, + EnvironmentEdgeManager.currentTime() + 1000000, ZERO); p.setACL(USER_OTHER.getShortName(), new Permission(Permission.Action.READ)); t.put(p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 8f8d92d..1773027 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -117,7 +117,7 @@ public class TestTokenAuthentication { public TokenServer(Configuration conf) throws IOException { this.conf = conf; - this.startcode = EnvironmentEdgeManager.currentTimeMillis(); + this.startcode = EnvironmentEdgeManager.currentTime(); // Server to handle client requests. String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost("default", "default")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java index d471c39..b22a713 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java @@ -155,7 +155,7 @@ public class TestZKSecretWatcher { LOG.debug("Master new current key: "+key3.getKeyId()); // force expire the original key - key1.setExpiration(EnvironmentEdgeManager.currentTimeMillis() - 1000); + key1.setExpiration(EnvironmentEdgeManager.currentTime() - 1000); KEY_MASTER.removeExpiredKeys(); // verify removed from master assertNull(KEY_MASTER.getKey(key1.getKeyId())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index dd2bfd4..dba1bba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -115,7 +115,7 @@ public class TestCoprocessorScanPolicy { p.add(F, tableName.getName(), Bytes.toBytes(2)); t.put(p); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // insert 2 versions p = new Put(R); @@ -165,7 +165,7 @@ public class TestCoprocessorScanPolicy { desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ManualEnvironmentEdge me = new ManualEnvironmentEdge(); me.setValue(now); EnvironmentEdgeManagerTestHelper.injectEdge(me); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java index 5c9208a..d82db0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java @@ -53,7 +53,7 @@ public class TestFSHDFSUtils { @Before public void setup() { - this.startTime = EnvironmentEdgeManager.currentTimeMillis(); + this.startTime = EnvironmentEdgeManager.currentTime(); } /** @@ -73,7 +73,7 @@ public class TestFSHDFSUtils { Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). - assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) > + assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); }