Index: hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
*
@@ -160,7 +161,7 @@
long run() throws Exception {
long elapsedTime;
setUp();
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
for (int i = 0; i < totalRows; i++) {
if (i > 0 && i % getReportingPeriod() == 0) {
@@ -168,7 +169,7 @@
}
doRow(i);
}
- elapsedTime = System.currentTimeMillis() - startTime;
+ elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
} finally {
tearDown();
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java (working copy)
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.io.HbaseMapWritable;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.DataInputBuffer;
import org.junit.Test;
@@ -156,7 +157,7 @@
byte[] qf7 = "qf7".getBytes();
byte[] qf8 = "qf8".getBytes();
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
byte[] val = "val".getBytes();
Put put = new Put(row);
@@ -226,7 +227,7 @@
byte[] fam = "fam".getBytes();
byte[] qf1 = "qf1".getBytes();
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Delete delete = new Delete(row);
delete.deleteColumn(fam, qf1, ts);
@@ -253,7 +254,7 @@
byte[] fam = "fam".getBytes();
byte[] qf1 = "qf1".getBytes();
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
int maxVersions = 2;
long lockid = 5;
RowLock rowLock = new RowLock(lockid);
@@ -296,7 +297,7 @@
byte[] fam = "fam".getBytes();
byte[] qf1 = "qf1".getBytes();
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
int maxVersions = 2;
Scan scan = new Scan(startRow, stopRow);
@@ -530,7 +531,7 @@
byte[] row = name.getBytes();
byte[] fam = "fam".getBytes();
byte[] qf = "qf".getBytes();
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
byte[] val = "val".getBytes();
KeyValue kv = new KeyValue(row, fam, qf, ts, val);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
public abstract class MultithreadedTestUtil {
@@ -61,9 +62,9 @@
}
public void waitFor(long millis) throws Exception {
- long endTime = System.currentTimeMillis() + millis;
+ long endTime = EnvironmentEdgeManager.currentTimeMillis() + millis;
while (!stopped) {
- long left = endTime - System.currentTimeMillis();
+ long left = endTime - EnvironmentEdgeManager.currentTimeMillis();
if (left <= 0) break;
synchronized (this) {
checkException();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java (working copy)
@@ -43,6 +43,7 @@
import org.apache.hadoop.hbase.rest.model.TableListModel;
import org.apache.hadoop.hbase.rest.model.TableRegionModel;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import static org.junit.Assert.*;
@@ -108,8 +109,8 @@
admin.split(TABLE);
// give some time for the split to happen
- long timeout = System.currentTimeMillis() + (15 * 1000);
- while (System.currentTimeMillis() < timeout && m.size()!=2){
+ long timeout = EnvironmentEdgeManager.currentTimeMillis() + (15 * 1000);
+ while (EnvironmentEdgeManager.currentTimeMillis() < timeout && m.size()!=2){
try {
Thread.sleep(250);
} catch (InterruptedException e) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java (working copy)
@@ -61,6 +61,7 @@
import org.apache.hadoop.hbase.rest.client.RemoteAdmin;
import org.apache.hadoop.hbase.rest.client.RemoteHTable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hbase.util.MurmurHash;
import org.apache.hadoop.hbase.util.Pair;
@@ -675,7 +676,7 @@
// Below is make it so when Tests are all running in the one
// jvm, that they each have a differently seeded Random.
private static final Random randomSeed =
- new Random(System.currentTimeMillis());
+ new Random(EnvironmentEdgeManager.currentTimeMillis());
private static long nextRandomSeed() {
return randomSeed.nextLong();
}
@@ -730,10 +731,10 @@
long test() throws IOException {
long elapsedTime;
testSetup();
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
testTimed();
- elapsedTime = System.currentTimeMillis() - startTime;
+ elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
} finally {
testTakedown();
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java (working copy)
@@ -40,6 +40,7 @@
import org.apache.hadoop.hbase.rest.client.Cluster;
import org.apache.hadoop.hbase.rest.client.RemoteHTable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import static org.junit.Assert.*;
import org.junit.AfterClass;
@@ -64,7 +65,7 @@
private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2");
private static final long ONE_HOUR = 60 * 60 * 1000;
- private static final long TS_2 = System.currentTimeMillis();
+ private static final long TS_2 = EnvironmentEdgeManager.currentTimeMillis();
private static final long TS_1 = TS_2 - ONE_HOUR;
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java (working copy)
@@ -56,6 +56,7 @@
import org.apache.hadoop.hbase.thrift2.generated.TResult;
import org.apache.hadoop.hbase.thrift2.generated.TScan;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsUtil;
@@ -292,14 +293,14 @@
List columnValues = new ArrayList();
TColumnValue columnValueA = new TColumnValue(ByteBuffer.wrap(familyAname), ByteBuffer.wrap(qualifierAname),
ByteBuffer.wrap(valueAname));
- columnValueA.setTimestamp(System.currentTimeMillis() - 10);
+ columnValueA.setTimestamp(EnvironmentEdgeManager.currentTimeMillis() - 10);
columnValues.add(columnValueA);
TPut put = new TPut(ByteBuffer.wrap(rowName), columnValues);
put.setColumnValues(columnValues);
handler.put(table, put);
- columnValueA.setTimestamp(System.currentTimeMillis());
+ columnValueA.setTimestamp(EnvironmentEdgeManager.currentTimeMillis());
handler.put(table, put);
TGet get = new TGet(ByteBuffer.wrap(rowName));
@@ -329,8 +330,8 @@
byte[] rowName = "testDeleteSingleTimestamp".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname);
- long timestamp1 = System.currentTimeMillis() - 10;
- long timestamp2 = System.currentTimeMillis();
+ long timestamp1 = EnvironmentEdgeManager.currentTimeMillis() - 10;
+ long timestamp2 = EnvironmentEdgeManager.currentTimeMillis();
List columnValues = new ArrayList();
TColumnValue columnValueA = new TColumnValue(ByteBuffer.wrap(familyAname), ByteBuffer.wrap(qualifierAname),
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java (working copy)
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.balancer.DefaultLoadBalancer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -158,8 +159,8 @@
connectionZK.getRecoverableZooKeeper().getSessionId()));
// It's asynchronous, so we may have to wait a little...
- final long limit1 = System.currentTimeMillis() + 3000;
- while (System.currentTimeMillis() < limit1 && state != States.CLOSED){
+ final long limit1 = EnvironmentEdgeManager.currentTimeMillis() + 3000;
+ while (EnvironmentEdgeManager.currentTimeMillis() < limit1 && state != States.CLOSED){
state = connectionZK.getRecoverableZooKeeper().getState();
}
LOG.info("After using zkw loop=" + state);
@@ -180,8 +181,8 @@
// As it's an asynchronous event we may got the same ZKW, if it's not
// yet invalidated. Hence this loop.
- final long limit2 = System.currentTimeMillis() + 3000;
- while (System.currentTimeMillis() < limit2 &&
+ final long limit2 = EnvironmentEdgeManager.currentTimeMillis() + 3000;
+ while (EnvironmentEdgeManager.currentTimeMillis() < limit2 &&
state2 != States.CONNECTED && state2 != States.CONNECTING) {
newConnectionZK = getZooKeeperWatcher(connection);
@@ -234,7 +235,7 @@
public void testSanity() throws Exception{
HBaseAdmin admin =
new HBaseAdmin(TEST_UTIL.getConfiguration());
- String tableName = "test"+System.currentTimeMillis();
+ String tableName = "test"+EnvironmentEdgeManager.currentTimeMillis();
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor family = new HColumnDescriptor("fam");
desc.addFamily(family);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.KeyValue.MetaComparator;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.WritableUtils;
import org.junit.experimental.categories.Category;
@@ -133,7 +134,7 @@
public void testMoreComparisons() throws Exception {
// Root compares
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now);
KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now);
KVComparator c = new KeyValue.RootComparator();
@@ -172,7 +173,7 @@
public void testBadMetaCompareSingleDelim() {
MetaComparator c = new KeyValue.MetaComparator();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// meta keys values are not quite right. A users can enter illegal values
// from shell when scanning meta.
KeyValue a = new KeyValue(Bytes.toBytes("table,a1"), now);
@@ -189,7 +190,7 @@
public void testMetaComparatorTableKeysWithCommaOk() {
MetaComparator c = new KeyValue.MetaComparator();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// meta keys values are not quite right. A users can enter illegal values
// from shell when scanning meta.
KeyValue a = new KeyValue(Bytes.toBytes("table,key,with,commas1,1234"), now);
@@ -225,7 +226,7 @@
}
private void metacomparisons(final KeyValue.MetaComparator c) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now),
new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) == 0);
KeyValue a = new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now);
@@ -236,7 +237,7 @@
}
private void comparisons(final KeyValue.KVComparator c) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now),
new KeyValue(Bytes.toBytes(".META.,,1"), now)) == 0);
assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now),
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/MapFilePerformanceEvaluation.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.WritableComparable;
/**
@@ -158,7 +159,7 @@
long run() throws Exception {
long elapsedTime;
setUp();
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
for (int i = 0; i < totalRows; i++) {
if (i > 0 && i % getReportingPeriod() == 0) {
@@ -166,7 +167,7 @@
}
doRow(i);
}
- elapsedTime = System.currentTimeMillis() - startTime;
+ elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
} finally {
tearDown();
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (working copy)
@@ -61,6 +61,7 @@
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hbase.util.MurmurHash;
@@ -793,7 +794,7 @@
// Below is make it so when Tests are all running in the one
// jvm, that they each have a differently seeded Random.
private static final Random randomSeed =
- new Random(System.currentTimeMillis());
+ new Random(EnvironmentEdgeManager.currentTimeMillis());
private static long nextRandomSeed() {
return randomSeed.nextLong();
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java (working copy)
@@ -232,10 +232,10 @@
new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME));
FileStatus future =
new FileStatus(0, false, 0, 0, -1,
- new Path("/tmp/tablinfo." + System.currentTimeMillis()));
+ new Path("/tmp/tablinfo." + EnvironmentEdgeManager.currentTimeMillis()));
FileStatus farFuture =
new FileStatus(0, false, 0, 0, -1,
- new Path("/tmp/tablinfo." + System.currentTimeMillis() + 1000));
+ new Path("/tmp/tablinfo." + EnvironmentEdgeManager.currentTimeMillis() + 1000));
FileStatus [] alist = {bare, future, farFuture};
FileStatus [] blist = {bare, farFuture, future};
FileStatus [] clist = {farFuture, bare, future};
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIdLock.java (working copy)
@@ -64,8 +64,8 @@
public Boolean call() throws Exception {
Thread.currentThread().setName(clientId);
Random rand = new Random();
- long endTime = System.currentTimeMillis() + NUM_SECONDS * 1000;
- while (System.currentTimeMillis() < endTime) {
+ long endTime = EnvironmentEdgeManager.currentTimeMillis() + NUM_SECONDS * 1000;
+ while (EnvironmentEdgeManager.currentTimeMillis() < endTime) {
long id = rand.nextInt(NUM_IDS);
IdLock.Entry lockEntry = idLock.getLockEntry(id);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (working copy)
@@ -49,7 +49,7 @@
private static final byte [] VALUE;
static {
// We will use the same value for the rows as that is not really important here
- String partialValue = String.valueOf(System.currentTimeMillis());
+ String partialValue = String.valueOf(EnvironmentEdgeManager.currentTimeMillis());
StringBuilder val = new StringBuilder();
while (val.length() < 1024) {
val.append(partialValue);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java (working copy)
@@ -148,7 +148,7 @@
LOG.info("Started thread #" + readerId + " for reads...");
}
- startTimeMs = System.currentTimeMillis();
+ startTimeMs = EnvironmentEdgeManager.currentTimeMillis();
curKey = startKey;
while (curKey < endKey && !aborted) {
long k = getNextKeyToRead();
@@ -236,7 +236,7 @@
numReadFailures.addAndGet(1);
LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "")
+ ", time from start: "
- + (System.currentTimeMillis() - startTimeMs) + " ms");
+ + (EnvironmentEdgeManager.currentTimeMillis() - startTimeMs) + " ms");
}
return get;
}
@@ -245,9 +245,9 @@
String rowKey = Bytes.toString(get.getRow());
// read the data
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
Result result = table.get(get);
- totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
+ totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTimeMillis() - start);
numKeys.addAndGet(1);
// if we got no data report error
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java (working copy)
@@ -167,10 +167,10 @@
put.add(columnFamily, Bytes.toBytes(colAsStr),
dataGenerator.generateRandomSizeValue(rowKey, colAsStr));
try {
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
table.put(put);
numCols.addAndGet(1);
- totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
+ totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTimeMillis() - start);
} catch (IOException e) {
failedKeySet.add(rowKey);
LOG.error("Failed to insert: " + rowKey);
@@ -200,11 +200,11 @@
}
try {
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
table.put(put);
numCols.addAndGet(endCol - startCol);
totalOpTimeMs.addAndGet(
- System.currentTimeMillis() - start);
+ EnvironmentEdgeManager.currentTimeMillis() - start);
} catch (IOException e) {
failedKeySet.add(rowKey);
e.printStackTrace();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java (working copy)
@@ -118,7 +118,7 @@
// given the default replication factor is 3, the same as the number of
// datanodes; the locality index for each host should be 100%,
// or getWeight for each host should be the same as getUniqueBlocksWeights
- final long maxTime = System.currentTimeMillis() + 2000;
+ final long maxTime = EnvironmentEdgeManager.currentTimeMillis() + 2000;
boolean ok;
do {
ok = true;
@@ -131,7 +131,7 @@
long weight = blocksDistribution.getWeight(host);
ok = (ok && uniqueBlocksTotalWeight == weight);
}
- } while (!ok && System.currentTimeMillis() < maxTime);
+ } while (!ok && EnvironmentEdgeManager.currentTimeMillis() < maxTime);
assertTrue(ok);
} finally {
htu.shutdownMiniDFSCluster();
@@ -152,7 +152,7 @@
// given the default replication factor is 3, we will have total of 9
// replica of blocks; thus the host with the highest weight should have
// weight == 3 * DEFAULT_BLOCK_SIZE
- final long maxTime = System.currentTimeMillis() + 2000;
+ final long maxTime = EnvironmentEdgeManager.currentTimeMillis() + 2000;
long weight;
long uniqueBlocksTotalWeight;
do {
@@ -165,7 +165,7 @@
weight = blocksDistribution.getWeight(tophost);
// NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
- } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
+ } while (uniqueBlocksTotalWeight != weight && EnvironmentEdgeManager.currentTimeMillis() < maxTime);
assertTrue(uniqueBlocksTotalWeight == weight);
} finally {
@@ -186,14 +186,14 @@
// given the default replication factor is 3, we will have total of 3
// replica of blocks; thus there is one host without weight
- final long maxTime = System.currentTimeMillis() + 2000;
+ final long maxTime = EnvironmentEdgeManager.currentTimeMillis() + 2000;
HDFSBlocksDistribution blocksDistribution;
do {
FileStatus status = fs.getFileStatus(testFile);
blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
// NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
}
- while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
+ while (blocksDistribution.getTopHosts().size() != 3 && EnvironmentEdgeManager.currentTimeMillis() < maxTime);
assertEquals("Wrong number of hosts distributing blocks.", 3,
blocksDistribution.getTopHosts().size());
} finally {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java (working copy)
@@ -48,7 +48,7 @@
});
LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)");
sleeper.start();
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
LOG.debug("Main thread: sleeping for 500 ms");
Threads.sleep(500);
@@ -67,7 +67,7 @@
assertTrue("sleepWithoutInterrupt did not preserve the thread's " +
"interrupted status", wasInterrupted);
- long timeElapsed = System.currentTimeMillis() - startTime;
+ long timeElapsed = EnvironmentEdgeManager.currentTimeMillis() - startTime;
assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " +
"range of the sleep time " + SLEEP_TIME_MS,
Math.abs(timeElapsed - SLEEP_TIME_MS) < TOLERANCE_MS);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestByteBloomFilter.java (working copy)
@@ -108,23 +108,23 @@
float err = (float)0.01;
ByteBloomFilter b = new ByteBloomFilter(10*1000*1000, (float)err, Hash.MURMUR_HASH, 3);
b.allocBloom();
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long origSize = b.getByteSize();
for (int i = 0; i < 1*1000*1000; ++i) {
b.add(Bytes.toBytes(i));
}
- long endTime = System.currentTimeMillis();
+ long endTime = EnvironmentEdgeManager.currentTimeMillis();
System.out.println("Total Add time = " + (endTime - startTime) + "ms");
// fold
- startTime = System.currentTimeMillis();
+ startTime = EnvironmentEdgeManager.currentTimeMillis();
b.compactBloom();
- endTime = System.currentTimeMillis();
+ endTime = EnvironmentEdgeManager.currentTimeMillis();
System.out.println("Total Fold time = " + (endTime - startTime) + "ms");
assertTrue(origSize >= b.getByteSize()<<3);
// test
- startTime = System.currentTimeMillis();
+ startTime = EnvironmentEdgeManager.currentTimeMillis();
int falsePositives = 0;
for (int i = 0; i < 2*1000*1000; ++i) {
@@ -134,7 +134,7 @@
assertFalse(i < 1*1000*1000);
}
}
- endTime = System.currentTimeMillis();
+ endTime = EnvironmentEdgeManager.currentTimeMillis();
System.out.println("Total Contains time = " + (endTime - startTime) + "ms");
System.out.println("False Positive = " + falsePositives);
assertTrue(falsePositives <= (1*1000*1000)*err);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java (working copy)
@@ -99,7 +99,7 @@
@Override
public void run() {
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long priorNumKeys = 0;
long priorCumulativeOpTime = 0;
int priorAverageKeysPerSecond = 0;
@@ -114,7 +114,7 @@
LOG.info(threadsLeft + "Number of keys = 0");
} else {
long numKeys = MultiThreadedAction.this.numKeys.get();
- long time = System.currentTimeMillis() - startTime;
+ long time = EnvironmentEdgeManager.currentTimeMillis() - startTime;
long totalOpTime = totalOpTimeMs.get();
long numKeysDelta = numKeys - priorNumKeys;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (working copy)
@@ -264,7 +264,7 @@
// Create a log that we can reuse when we need to open regions
Path logPath = new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" +
- System.currentTimeMillis());
+ EnvironmentEdgeManager.currentTimeMillis());
LOG.info("Creating log " + logPath.toString());
Path oldLogDir = new Path("/tmp", HConstants.HREGION_OLDLOGDIR_NAME);
HLog log = new HLog(this.fs, logPath, oldLogDir, this.conf);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java (working copy)
@@ -36,7 +36,7 @@
@Test
public void testGetCurrentTimeUsesSystemClock() {
DefaultEnvironmentEdge edge = new DefaultEnvironmentEdge();
- long systemTime = System.currentTimeMillis();
+ long systemTime = EnvironmentEdgeManager.currentTimeMillis();
long edgeTime = edge.currentTimeMillis();
assertTrue("System time must be either the same or less than the edge time",
systemTime < edgeTime || systemTime == edgeTime);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (working copy)
@@ -70,6 +70,7 @@
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.io.DataInputBuffer;
import org.junit.After;
@@ -154,7 +155,7 @@
Configuration c = TEST_UTIL.getConfiguration();
HTable h = new HTable(c, TABLENAME);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(FAMILY, C0, T1);
h.put(p);
@@ -4671,13 +4672,13 @@
private void waitForStoreFileCount(Store store, int count, int timeout)
throws InterruptedException {
- long start = System.currentTimeMillis();
- while (start + timeout > System.currentTimeMillis() &&
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ while (start + timeout > EnvironmentEdgeManager.currentTimeMillis() &&
store.getNumberOfStoreFiles() != count) {
Thread.sleep(100);
}
System.out.println("start=" + start + ", now=" +
- System.currentTimeMillis() + ", cur=" + store.getNumberOfStoreFiles());
+ EnvironmentEdgeManager.currentTimeMillis() + ", cur=" + store.getNumberOfStoreFiles());
assertEquals(count, store.getNumberOfStoreFiles());
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java (working copy)
@@ -52,6 +52,7 @@
import org.apache.hadoop.hbase.regionserver.wal.HLogUtilsForTests;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.*;
@@ -1287,8 +1288,8 @@
}
boolean isInList = ProtobufUtil.getOnlineRegions(rs).contains(info);
- long timeout = System.currentTimeMillis() + 2000;
- while ((System.currentTimeMillis() < timeout) && (isInList)) {
+ long timeout = EnvironmentEdgeManager.currentTimeMillis() + 2000;
+ while ((EnvironmentEdgeManager.currentTimeMillis() < timeout) && (isInList)) {
Thread.sleep(100);
isInList = ProtobufUtil.getOnlineRegions(rs).contains(info);
}
@@ -1570,7 +1571,7 @@
int initialCount = HConnectionTestingUtility.getConnectionCount();
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
try {
HBaseAdmin.checkHBaseAvailable(conf);
assertTrue(false);
@@ -1578,7 +1579,7 @@
} catch (ZooKeeperConnectionException ignored) {
} catch (ServiceException ignored) {
}
- long end = System.currentTimeMillis();
+ long end = EnvironmentEdgeManager.currentTimeMillis();
int finalCount = HConnectionTestingUtility.getConnectionCount();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (working copy)
@@ -77,6 +77,7 @@
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.RegionSplitter;
@@ -1650,10 +1651,10 @@
public void waitTableAvailable(byte[] table, long timeoutMillis)
throws InterruptedException, IOException {
- long startWait = System.currentTimeMillis();
+ long startWait = EnvironmentEdgeManager.currentTimeMillis();
while (!getHBaseAdmin().isTableAvailable(table)) {
assertTrue("Timed out waiting for table " + Bytes.toStringBinary(table),
- System.currentTimeMillis() - startWait < timeoutMillis);
+ EnvironmentEdgeManager.currentTimeMillis() - startWait < timeoutMillis);
Thread.sleep(200);
}
}
@@ -2135,11 +2136,11 @@
public void assertRegionOnServer(
final HRegionInfo hri, final ServerName server,
final long timeout) throws IOException, InterruptedException {
- long timeoutTime = System.currentTimeMillis() + timeout;
+ long timeoutTime = EnvironmentEdgeManager.currentTimeMillis() + timeout;
while (true) {
List regions = getHBaseAdmin().getOnlineRegions(server);
if (regions.contains(hri)) return;
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now > timeoutTime) break;
Thread.sleep(10);
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java (working copy)
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.thrift.generated.TRegionInfo;
import org.apache.hadoop.hbase.thrift.generated.TRowResult;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.MetricsContext;
@@ -375,13 +376,13 @@
handler.createTable(tableAname, getColumnDescriptors());
// Apply timestamped Mutations to rowA
- long time1 = System.currentTimeMillis();
+ long time1 = EnvironmentEdgeManager.currentTimeMillis();
handler.mutateRowTs(tableAname, rowAname, getMutations(), time1, null);
Thread.sleep(1000);
// Apply timestamped BatchMutations for rowA and rowB
- long time2 = System.currentTimeMillis();
+ long time2 = EnvironmentEdgeManager.currentTimeMillis();
handler.mutateRowsTs(tableAname, getBatchMutations(), time2, null);
// Apply an overlapping timestamped mutation to rowB
@@ -454,7 +455,7 @@
handler.createTable(tableAname, getColumnDescriptors());
// Apply timestamped Mutations to rowA
- long time1 = System.currentTimeMillis();
+ long time1 = EnvironmentEdgeManager.currentTimeMillis();
handler.mutateRowTs(tableAname, rowAname, getMutations(), time1, null);
// Sleep to assure that 'time1' and 'time2' will be different even with a
@@ -462,7 +463,7 @@
Thread.sleep(1000);
// Apply timestamped BatchMutations for rowA and rowB
- long time2 = System.currentTimeMillis();
+ long time2 = EnvironmentEdgeManager.currentTimeMillis();
handler.mutateRowsTs(tableAname, getBatchMutations(), time2, null);
time1 += 1;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
@@ -56,7 +57,7 @@
public static void concurrentReads(final Runnable r) {
final int count = 1;
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
List threads = new ArrayList(count);
for (int i = 0; i < count; i++) {
Thread t = new Thread(r);
@@ -73,6 +74,6 @@
e.printStackTrace();
}
}
- LOG.info("Test took " + (System.currentTimeMillis() - now));
+ LOG.info("Test took " + (EnvironmentEdgeManager.currentTimeMillis() - now));
}
}
\ No newline at end of file
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java (working copy)
@@ -62,6 +62,7 @@
import org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.util.StringUtils;
@@ -581,7 +582,7 @@
Random rand = new Random();
StoreFileScanner scanner = reader.getStoreFileScanner(true, pread);
- while (System.currentTimeMillis() < endTime) {
+ while (EnvironmentEdgeManager.currentTimeMillis() < endTime) {
byte[] row = createRandomRow(rand, firstRow, lastRow);
KeyValue kvToSeek = new KeyValue(row, family,
createRandomQualifier(rand));
@@ -654,9 +655,9 @@
LOG.info("Starting statistics printer");
thread = Thread.currentThread();
thread.setName(StatisticsPrinter.class.getSimpleName());
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long curTime;
- while ((curTime = System.currentTimeMillis()) < endTime &&
+ while ((curTime = EnvironmentEdgeManager.currentTimeMillis()) < endTime &&
!stopRequested) {
long elapsedTime = curTime - startTime;
printStats(elapsedTime);
@@ -768,7 +769,7 @@
try {
ExecutorCompletionService ecs =
new ExecutorCompletionService(exec);
- endTime = System.currentTimeMillis() + 1000 * durationSec;
+ endTime = EnvironmentEdgeManager.currentTimeMillis() + 1000 * durationSec;
boolean pread = true;
for (int i = 0; i < numReadThreads; ++i)
ecs.submit(new RandomReader(i, reader, pread));
@@ -776,7 +777,7 @@
Future result;
while (true) {
try {
- result = ecs.poll(endTime + 1000 - System.currentTimeMillis(),
+ result = ecs.poll(endTime + 1000 - EnvironmentEdgeManager.currentTimeMillis(),
TimeUnit.MILLISECONDS);
if (result == null)
break;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java (working copy)
@@ -42,6 +42,7 @@
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -95,10 +96,10 @@
HLog hlog = region.getLog();
try {
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
for (int i = 0; i < numIterations; ++i) {
Put put = setupPut(rand, key, value, numFamilies);
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
WALEdit walEdit = new WALEdit();
addFamilyMapToWALEdit(put.getFamilyMap(), walEdit);
HRegionInfo hri = region.getRegionInfo();
@@ -109,7 +110,7 @@
hlog.append(hri, hri.getTableName(), walEdit, now, htd);
}
}
- long totalTime = (System.currentTimeMillis() - startTime);
+ long totalTime = (EnvironmentEdgeManager.currentTimeMillis() - startTime);
logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
} catch (Exception e) {
LOG.error(getClass().getSimpleName() + " Thread failed", e);
@@ -338,13 +339,13 @@
private long runBenchmark(Runnable runnable, final int numThreads) throws InterruptedException {
Thread[] threads = new Thread[numThreads];
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
for (int i = 0; i < numThreads; ++i) {
threads[i] = new Thread(runnable);
threads[i].start();
}
for (Thread t : threads) t.join();
- long endTime = System.currentTimeMillis();
+ long endTime = EnvironmentEdgeManager.currentTimeMillis();
return(endTime - startTime);
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java (working copy)
@@ -57,6 +57,7 @@
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.ipc.RemoteException;
@@ -169,7 +170,7 @@
Path regiondir = new Path(tdir,
HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
fs.mkdirs(regiondir);
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
HLog.Entry entry =
new HLog.Entry(new HLogKey(encoded,
HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
@@ -856,7 +857,7 @@
kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("column"));
- log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
+ log.append(regioninfo, tableName, kvs, EnvironmentEdgeManager.currentTimeMillis(), htd);
}
// Send the data to HDFS datanodes and close the HDFS writer
log.sync();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -111,7 +112,7 @@
this.log.info(getName() +" started");
try {
for (int i = 0; i < this.count; i++) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// Roll every ten edits if the log has anything in it.
if (i % 10 == 0 && this.wal.getNumEntries() > 0) {
this.wal.rollWriter();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (working copy)
@@ -248,7 +248,7 @@
put.add(HConstants.CATALOG_FAMILY, null, value);
table.put(put);
}
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long remaining = timeout;
while (remaining > 0) {
if (log.isLowReplicationRollEnabled() == expect) {
@@ -259,7 +259,7 @@
} catch (InterruptedException e) {
// continue
}
- remaining = timeout - (System.currentTimeMillis() - startTime);
+ remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
}
@@ -342,7 +342,7 @@
writeData(table, 2);
- long curTime = System.currentTimeMillis();
+ long curTime = EnvironmentEdgeManager.currentTimeMillis();
long oldFilenum = log.getFilenum();
assertTrue("Log should have a timestamp older than now",
curTime > oldFilenum && oldFilenum != -1);
@@ -454,7 +454,7 @@
table.setAutoFlush(true);
- long curTime = System.currentTimeMillis();
+ long curTime = EnvironmentEdgeManager.currentTimeMillis();
long oldFilenum = log.getFilenum();
assertTrue("Log should have a timestamp older than now",
curTime > oldFilenum && oldFilenum != -1);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.EntryBuffers;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.RegionEntryBuffer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -70,7 +71,7 @@
String last = HLogSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE);
createFile(fs, recoverededits, last);
createFile(fs, recoverededits,
- Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());
+ Long.toString(Long.MAX_VALUE) + "." + EnvironmentEdgeManager.currentTimeMillis());
NavigableSet files = HLog.getSplitEditFilesSorted(fs, regiondir);
assertEquals(7, files.size());
assertEquals(files.pollFirst().getName(), first);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (working copy)
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -183,10 +184,10 @@
byte [] qualifier = Bytes.toBytes(Integer.toString(j));
byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
edit.add(new KeyValue(rowName, family, qualifier,
- System.currentTimeMillis(), column));
+ EnvironmentEdgeManager.currentTimeMillis(), column));
LOG.info("Region " + i + ": " + edit);
log.append(infos[i], tableName, edit,
- System.currentTimeMillis(), htd);
+ EnvironmentEdgeManager.currentTimeMillis(), htd);
}
}
log.rollWriter();
@@ -249,7 +250,7 @@
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
- wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
+ wal.append(info, bytes, kvs, EnvironmentEdgeManager.currentTimeMillis(), htd);
}
// Now call sync and try reading. Opening a Reader before you sync just
// gives you EOFE.
@@ -267,7 +268,7 @@
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
- wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
+ wal.append(info, bytes, kvs, EnvironmentEdgeManager.currentTimeMillis(), htd);
}
reader = HLog.getReader(fs, walPath, conf);
count = 0;
@@ -286,7 +287,7 @@
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
- wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
+ wal.append(info, bytes, kvs, EnvironmentEdgeManager.currentTimeMillis(), htd);
}
// Now I should have written out lots of blocks. Sync then read.
wal.sync();
@@ -388,7 +389,7 @@
for (int i = 0; i < total; i++) {
WALEdit kvs = new WALEdit();
kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
- wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
+ wal.append(regioninfo, tableName, kvs, EnvironmentEdgeManager.currentTimeMillis(), htd);
}
// Now call sync to send the data to HDFS datanodes
wal.sync();
@@ -503,7 +504,7 @@
log = new HLog(fs, dir, oldLogDir, conf);
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
- long timestamp = System.currentTimeMillis();
+ long timestamp = EnvironmentEdgeManager.currentTimeMillis();
WALEdit cols = new WALEdit();
for (int i = 0; i < COL_COUNT; i++) {
cols.add(new KeyValue(row, Bytes.toBytes("column"),
@@ -515,7 +516,7 @@
HTableDescriptor htd = new HTableDescriptor();
htd.addFamily(new HColumnDescriptor("column"));
- log.append(info, tableName, cols, System.currentTimeMillis(), htd);
+ log.append(info, tableName, cols, EnvironmentEdgeManager.currentTimeMillis(), htd);
long logSeqId = log.startCacheFlush(info.getEncodedNameAsBytes());
log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
info.isMetaRegion());
@@ -575,7 +576,7 @@
try {
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
- long timestamp = System.currentTimeMillis();
+ long timestamp = EnvironmentEdgeManager.currentTimeMillis();
WALEdit cols = new WALEdit();
for (int i = 0; i < COL_COUNT; i++) {
cols.add(new KeyValue(row, Bytes.toBytes("column"),
@@ -586,7 +587,7 @@
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HTableDescriptor htd = new HTableDescriptor();
htd.addFamily(new HColumnDescriptor("column"));
- log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
+ log.append(hri, tableName, cols, EnvironmentEdgeManager.currentTimeMillis(), htd);
long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
log.close();
@@ -643,7 +644,7 @@
try {
DumbWALActionsListener visitor = new DumbWALActionsListener();
log.registerWALActionsListener(visitor);
- long timestamp = System.currentTimeMillis();
+ long timestamp = EnvironmentEdgeManager.currentTimeMillis();
HTableDescriptor htd = new HTableDescriptor();
htd.addFamily(new HColumnDescriptor("column"));
@@ -654,7 +655,7 @@
cols.add(new KeyValue(row, Bytes.toBytes("column"),
Bytes.toBytes(Integer.toString(i)),
timestamp, new byte[]{(byte) (i + '0')}));
- log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
+ log.append(hri, tableName, cols, EnvironmentEdgeManager.currentTimeMillis(), htd);
}
assertEquals(COL_COUNT, visitor.increments);
log.unregisterWALActionsListener(visitor);
@@ -662,7 +663,7 @@
cols.add(new KeyValue(row, Bytes.toBytes("column"),
Bytes.toBytes(Integer.toString(11)),
timestamp, new byte[]{(byte) (11 + '0')}));
- log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
+ log.append(hri, tableName, cols, EnvironmentEdgeManager.currentTimeMillis(), htd);
assertEquals(COL_COUNT, visitor.increments);
} finally {
if (log != null) log.closeAndDelete();
@@ -743,7 +744,7 @@
final byte [] row = Bytes.toBytes("row");
for (int i = 0; i < times; i++) {
- long timestamp = System.currentTimeMillis();
+ long timestamp = EnvironmentEdgeManager.currentTimeMillis();
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, row, row, timestamp, row));
log.append(hri, tableName, cols, timestamp, htd);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
@@ -137,7 +138,7 @@
}
memstorescanners = this.memstore.getScanners();
// Assert that new values are seen in kvset as we scan.
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
s = new StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
count = 0;
int snapshotIndex = 5;
@@ -501,12 +502,12 @@
byte [] row =
Bytes.toBytes(".META.,table," + Bytes.toString(kk) + ",1," + k);
KeyValue key = new KeyValue(row, CONTENTS, BASIC,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
(CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
mc.add(key);
System.out.println(key);
// key = new KeyValue(row, Bytes.toBytes(ANCHORNUM + k),
-// System.currentTimeMillis(),
+// EnvironmentEdgeManager.currentTimeMillis(),
// (ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
// mc.add(key);
// System.out.println(key);
@@ -539,15 +540,15 @@
addRows(this.memstore);
KeyValue closestToEmpty = this.memstore.getNextRow(KeyValue.LOWESTKEY);
assertTrue(KeyValue.COMPARATOR.compareRows(closestToEmpty,
- new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
+ new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTimeMillis())) == 0);
for (int i = 0; i < ROW_COUNT; i++) {
KeyValue nr = this.memstore.getNextRow(new KeyValue(Bytes.toBytes(i),
- System.currentTimeMillis()));
+ EnvironmentEdgeManager.currentTimeMillis()));
if (i + 1 == ROW_COUNT) {
assertEquals(nr, null);
} else {
assertTrue(KeyValue.COMPARATOR.compareRows(nr,
- new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
+ new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTimeMillis())) == 0);
}
}
//starting from each row, validate results should contain the starting row
@@ -868,7 +869,7 @@
private int addRows(final MemStore hmc, final long ts) {
for (int i = 0; i < ROW_COUNT; i++) {
long timestamp = ts == HConstants.LATEST_TIMESTAMP?
- System.currentTimeMillis(): ts;
+ EnvironmentEdgeManager.currentTimeMillis(): ts;
for (int ii = 0; ii < QUALIFIER_COUNT; ii++) {
byte [] row = Bytes.toBytes(i);
byte [] qf = makeQualifier(i, ii);
@@ -923,7 +924,7 @@
System.out.println(i + " Took for 1k usec: " + (System.nanoTime() - nanos)/1000);
nanos = System.nanoTime();
}
- long timestamp = System.currentTimeMillis();
+ long timestamp = EnvironmentEdgeManager.currentTimeMillis();
for (int ii = 0; ii < QUALIFIER_COUNT ; ii++) {
byte [] row = Bytes.toBytes(i);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressManager.java (working copy)
@@ -27,6 +27,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -76,7 +77,7 @@
// Create the master node with a dummy address
String host = "localhost";
int port = 1234;
- ServerName sn = new ServerName(host, port, System.currentTimeMillis());
+ ServerName sn = new ServerName(host, port, EnvironmentEdgeManager.currentTimeMillis());
LOG.info("Creating master node");
MasterAddressTracker.setMasterAddress(zk, zk.getMasterAddressZNode(), sn);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java (working copy)
@@ -44,6 +44,7 @@
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.Test;
@@ -91,7 +92,7 @@
.withCompression(COMPRESSION)
.withComparator(KeyValue.KEY_COMPARATOR)
.create();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (int i = 0; i < numRows; i++) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.experimental.categories.Category;
@@ -65,7 +66,7 @@
for (char c = 'a'; c <= 'c'; c++) {
byte[] row = Bytes.toBytes("ab" + c);
int i, j;
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
for (i = 0; i < 100; i++) {
byte[] b = Bytes.toBytes(String.format("%10d", i));
for (j = 0; j < 100; j++) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java (working copy)
@@ -56,6 +56,7 @@
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
@@ -318,11 +319,11 @@
/** bloks until the region split is complete in META and region server opens the daughters */
void blockUntilRegionSplit(long timeout, final byte[] regionName, boolean waitForDaughters)
throws IOException, InterruptedException {
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
log("blocking until region is split:" + Bytes.toStringBinary(regionName));
HRegionInfo daughterA = null, daughterB = null;
- while (System.currentTimeMillis() - start < timeout) {
+ while (EnvironmentEdgeManager.currentTimeMillis() - start < timeout) {
Result result = getRegionRow(regionName);
if (result == null) {
break;
@@ -341,16 +342,16 @@
//if we are here, this means the region split is complete or timed out
if (waitForDaughters) {
- long rem = timeout - (System.currentTimeMillis() - start);
+ long rem = timeout - (EnvironmentEdgeManager.currentTimeMillis() - start);
blockUntilRegionIsInMeta(rem, daughterA.getRegionName());
- rem = timeout - (System.currentTimeMillis() - start);
+ rem = timeout - (EnvironmentEdgeManager.currentTimeMillis() - start);
blockUntilRegionIsInMeta(rem, daughterB.getRegionName());
- rem = timeout - (System.currentTimeMillis() - start);
+ rem = timeout - (EnvironmentEdgeManager.currentTimeMillis() - start);
blockUntilRegionIsOpenedByRS(rem, daughterA.getRegionName());
- rem = timeout - (System.currentTimeMillis() - start);
+ rem = timeout - (EnvironmentEdgeManager.currentTimeMillis() - start);
blockUntilRegionIsOpenedByRS(rem, daughterB.getRegionName());
}
}
@@ -363,8 +364,8 @@
void blockUntilRegionIsInMeta(long timeout, byte[] regionName)
throws IOException, InterruptedException {
log("blocking until region is in META: " + Bytes.toStringBinary(regionName));
- long start = System.currentTimeMillis();
- while (System.currentTimeMillis() - start < timeout) {
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ while (EnvironmentEdgeManager.currentTimeMillis() - start < timeout) {
Result result = getRegionRow(regionName);
if (result != null) {
HRegionInfo info = MetaEditor.getHRegionInfo(result);
@@ -380,8 +381,8 @@
void blockUntilRegionIsOpenedByRS(long timeout, byte[] regionName)
throws IOException, InterruptedException {
log("blocking until region is opened by region server: " + Bytes.toStringBinary(regionName));
- long start = System.currentTimeMillis();
- while (System.currentTimeMillis() - start < timeout) {
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ while (EnvironmentEdgeManager.currentTimeMillis() - start < timeout) {
List regions = rs.getOnlineRegions(tableName);
for (HRegion region : regions) {
if (Bytes.compareTo(region.getRegionName(), regionName) == 0) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java (working copy)
@@ -46,6 +46,7 @@
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.filter.WhileMatchFilter;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
import org.junit.experimental.categories.Category;
@@ -239,7 +240,7 @@
// Write information to the meta table
- Put put = new Put(ROW_KEY, System.currentTimeMillis(), null);
+ Put put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTimeMillis(), null);
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
DataOutputStream s = new DataOutputStream(byteStream);
@@ -269,7 +270,7 @@
String address = HConstants.LOCALHOST_IP + ":" + HBaseTestingUtility.randomFreePort();
- put = new Put(ROW_KEY, System.currentTimeMillis(), null);
+ put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTimeMillis(), null);
put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
Bytes.toBytes(address));
@@ -307,7 +308,7 @@
address = "bar.foo.com:4321";
- put = new Put(ROW_KEY, System.currentTimeMillis(), null);
+ put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTimeMillis(), null);
put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
Bytes.toBytes(address));
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (working copy)
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.junit.experimental.categories.Category;
@@ -228,7 +229,7 @@
}
public void testDeleteVersionMaskingMultiplePuts() throws IOException {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue [] kvs1 = new KeyValue[] {
KeyValueTestUtil.create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care")
@@ -251,7 +252,7 @@
assertEquals(kvs2[1], results.get(0));
}
public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue [] kvs1 = new KeyValue[] {
KeyValueTestUtil.create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care")
@@ -405,7 +406,7 @@
* a column family (as should be triggered in a major compaction).
*/
public void testWildCardTtlScan() throws IOException {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue [] kvs = new KeyValue[] {
KeyValueTestUtil.create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
@@ -479,7 +480,7 @@
* Ensure that expired delete family markers don't override valid puts
*/
public void testExpiredDeleteFamily() throws Exception {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue [] kvs = new KeyValue[] {
new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000,
KeyValue.Type.DeleteFamily),
@@ -507,7 +508,7 @@
public void testDeleteMarkerLongevity() throws Exception {
try {
- final long now = System.currentTimeMillis();
+ final long now = EnvironmentEdgeManager.currentTimeMillis();
EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() {
public long currentTimeMillis() {
return now;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask;
import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -65,7 +66,7 @@
private boolean waitForCounterBoolean(AtomicLong ctr, long oldval, long newval,
long timems) {
- long curt = System.currentTimeMillis();
+ long curt = EnvironmentEdgeManager.currentTimeMillis();
long endt = curt + timems;
while (curt < endt) {
if (ctr.get() == oldval) {
@@ -73,7 +74,7 @@
Thread.sleep(10);
} catch (InterruptedException e) {
}
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
} else {
assertEquals(newval, ctr.get());
return true;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java (working copy)
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
@@ -55,7 +56,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@@ -151,7 +152,7 @@
HConstants.FOREVER, false);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@@ -195,7 +196,7 @@
HConstants.FOREVER, false);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@@ -261,7 +262,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@@ -307,7 +308,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Delete d = new Delete(T1, ts, null);
d.deleteColumns(c0, c0, ts);
@@ -349,7 +350,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
@@ -411,7 +412,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
p.add(c0, c1, T1);
@@ -492,7 +493,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@@ -584,7 +585,7 @@
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
@@ -634,7 +635,7 @@
HTableDescriptor htd = createTableDescriptor(getName(), 3, 1000, 1, true);
HRegion region = createNewHRegion(htd, null, null);
- long ts = System.currentTimeMillis() - 2000; // 2s in the past
+ long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; // 2s in the past
Put p = new Put(T1, ts);
p.add(c0, c0, T3);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (working copy)
@@ -640,10 +640,10 @@
ctx.startThreads();
LOG.info("...waiting for put thread to sync first time");
- long startWait = System.currentTimeMillis();
+ long startWait = EnvironmentEdgeManager.currentTimeMillis();
while (HLog.getSyncTime().count == 0) {
Thread.sleep(100);
- if (System.currentTimeMillis() - startWait > 10000) {
+ if (EnvironmentEdgeManager.currentTimeMillis() - startWait > 10000) {
fail("Timed out waiting for thread to sync first minibatch");
}
}
@@ -858,7 +858,7 @@
region.put(put);
//Creating put to add
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
KeyValue kv = new KeyValue(row1, fam2, qf1, ts, KeyValue.Type.Put, val2);
put = new Put(row1);
put.add(kv);
@@ -1242,7 +1242,7 @@
.toBytes("value")), false);
// TS out of range. should error
region.put(new Put(row).add(fam, Bytes.toBytes("qual"),
- System.currentTimeMillis() + 2000,
+ EnvironmentEdgeManager.currentTimeMillis() + 2000,
Bytes.toBytes("value")), false);
fail("Expected IOE for TS out of configured timerange");
} catch (DoNotRetryIOException ioe) {
@@ -1378,7 +1378,7 @@
// extract the key values out the memstore:
// This is kinda hacky, but better than nothing...
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue firstKv = region.getStore(fam1).memstore.kvset.first();
assertTrue(firstKv.getTimestamp() <= now);
now = firstKv.getTimestamp();
@@ -1669,10 +1669,10 @@
Path oldRegionPath = region.getRegionDir();
Path oldRegion1 = subregions[0].getRegionDir();
Path oldRegion2 = subregions[1].getRegionDir();
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
LOG.info("Merge regions elapsed time: " +
- ((System.currentTimeMillis() - startTime) / 1000.0));
+ ((EnvironmentEdgeManager.currentTimeMillis() - startTime) / 1000.0));
fs.delete(oldRegion1, true);
fs.delete(oldRegion2, true);
fs.delete(oldRegionPath, true);
@@ -1870,7 +1870,7 @@
byte [] fam4 = Bytes.toBytes("fam4");
byte [][] families = {fam1, fam2, fam3, fam4};
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
//Setting up region
String method = this.getName();
@@ -1935,7 +1935,7 @@
byte [] fam1 = Bytes.toBytes("fam1");
byte [][] families = {fam1};
- long ts1 = System.currentTimeMillis();
+ long ts1 = EnvironmentEdgeManager.currentTimeMillis();
long ts2 = ts1 + 1;
long ts3 = ts1 + 2;
@@ -1995,7 +1995,7 @@
byte [] fam1 = Bytes.toBytes("fam1");
byte [][] families = {fam1};
- long ts1 = 1; //System.currentTimeMillis();
+ long ts1 = 1; //EnvironmentEdgeManager.currentTimeMillis();
long ts2 = ts1 + 1;
long ts3 = ts1 + 2;
@@ -2142,7 +2142,7 @@
byte [] fam1 = Bytes.toBytes("fam1");
byte [][] families = {fam1};
- long ts1 = System.currentTimeMillis();
+ long ts1 = EnvironmentEdgeManager.currentTimeMillis();
long ts2 = ts1 + 1;
long ts3 = ts1 + 2;
@@ -2203,7 +2203,7 @@
byte [] qf2 = Bytes.toBytes("qualifier2");
byte [] fam1 = Bytes.toBytes("fam1");
- long ts1 = 1; //System.currentTimeMillis();
+ long ts1 = 1; //EnvironmentEdgeManager.currentTimeMillis();
long ts2 = ts1 + 1;
long ts3 = ts1 + 2;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (working copy)
@@ -54,6 +54,7 @@
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.experimental.categories.Category;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -245,7 +246,7 @@
// should result in a compacted store file that has no references to the
// deleted row.
LOG.debug("Adding deletes to memstore and flushing");
- Delete delete = new Delete(secondRowBytes, System.currentTimeMillis(), null);
+ Delete delete = new Delete(secondRowBytes, EnvironmentEdgeManager.currentTimeMillis(), null);
byte [][] famAndQf = {COLUMN_FAMILY, null};
delete.deleteFamily(famAndQf[0]);
r.delete(delete, null, true);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (working copy)
@@ -93,7 +93,7 @@
List expected = new ArrayList();
List result = new ArrayList();
- long id = System.currentTimeMillis();
+ long id = EnvironmentEdgeManager.currentTimeMillis();
Get get = new Get(row);
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -412,7 +412,7 @@
long oldValue = 1L;
long newValue = 3L;
this.store.add(new KeyValue(row, family, qf1,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(oldValue)));
// snapshot the store.
@@ -420,7 +420,7 @@
// add other things:
this.store.add(new KeyValue(row, family, qf2,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(oldValue)));
// update during the snapshot.
@@ -471,19 +471,19 @@
size += this.store.add(new KeyValue(Bytes.toBytes("200909091000"), family, qf1,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091200"), family, qf1,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091300"), family, qf1,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091400"), family, qf1,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091500"), family, qf1,
- System.currentTimeMillis(),
+ EnvironmentEdgeManager.currentTimeMillis(),
Bytes.toBytes(newValue)));
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (working copy)
@@ -53,6 +53,7 @@
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@@ -116,7 +117,7 @@
*/
public static void writeStoreFile(final StoreFile.Writer writer, byte[] fam, byte[] qualifier)
throws IOException {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
try {
for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
@@ -349,7 +350,7 @@
float err = conf.getFloat(
BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
Path f = writer.getPath();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
String row = String.format(localFormatter, i);
KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
@@ -429,7 +430,7 @@
.build();
// add delete family
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
String row = String.format(localFormatter, i);
KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
@@ -499,7 +500,7 @@
.withBytesPerChecksum(CKBYTES)
.build();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
for (int i = 0; i < rowCount*2; i += 2) { // rows
for (int j = 0; j < colCount*2; j += 2) { // column qualifiers
String row = String.format(localFormatter, i);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -97,14 +98,14 @@
} else {
admin.majorCompact(table);
}
- long curt = System.currentTimeMillis();
+ long curt = EnvironmentEdgeManager.currentTimeMillis();
long waitTime = 5000;
long endt = curt + waitTime;
CompactionState state = admin.getCompactionState(table);
while (state == CompactionState.NONE && curt < endt) {
Thread.sleep(10);
state = admin.getCompactionState(table);
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
}
// Now, should have the right compaction state,
// otherwise, the compaction should have already been done
@@ -114,14 +115,14 @@
assertEquals(CompactionState.NONE, state);
}
} else {
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
waitTime = 20000;
endt = curt + waitTime;
state = admin.getCompactionState(table);
while (state != CompactionState.NONE && curt < endt) {
Thread.sleep(10);
state = admin.getCompactionState(table);
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
}
// Now, compaction should be done.
assertEquals(CompactionState.NONE, state);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnRangeFilter.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.Test;
import org.junit.After;
import org.junit.AfterClass;
@@ -217,7 +218,7 @@
ResultScanner scanner = ht.getScanner(scan);
List results = new ArrayList();
LOG.info("scan column range: " + s.toString());
- long timeBeforeScan = System.currentTimeMillis();
+ long timeBeforeScan = EnvironmentEdgeManager.currentTimeMillis();
Result result;
while ((result = scanner.next()) != null) {
@@ -225,7 +226,7 @@
results.add(kv);
}
}
- long scanTime = System.currentTimeMillis() - timeBeforeScan;
+ long scanTime = EnvironmentEdgeManager.currentTimeMillis() - timeBeforeScan;
scanner.close();
LOG.info("scan time = " + scanTime + "ms");
LOG.info("found " + results.size() + " results");
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.After;
import org.junit.Before;
@@ -53,7 +54,7 @@
private static final byte[][] FAMILIES = {
Bytes.toBytes("familyOne"),Bytes.toBytes("familyTwo")
};
- private static final long STAMP_BASE = System.currentTimeMillis();
+ private static final long STAMP_BASE = EnvironmentEdgeManager.currentTimeMillis();
private static final long[] STAMPS = {
STAMP_BASE-100, STAMP_BASE-200, STAMP_BASE-300
};
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.Test;
@@ -102,10 +103,10 @@
flush(r, server);
}
LOG.info("Post flush on " + server.getServerName());
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
long timeout = now + 1000;
while(server.getRegionServerAccounting().getGlobalMemstoreSize() != 0 &&
- timeout < System.currentTimeMillis()) {
+ timeout < EnvironmentEdgeManager.currentTimeMillis()) {
Threads.sleep(10);
}
long size = server.getRegionServerAccounting().getGlobalMemstoreSize();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (working copy)
@@ -67,6 +67,7 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.Pair;
@@ -356,7 +357,7 @@
Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
htd.getColumnFamilies()[0].getName());
Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
@@ -776,7 +777,7 @@
htd.getColumnFamilies()[0].getName());
Reference ref =
top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -109,12 +110,12 @@
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
admin.offline(hri.getRegionName());
- long timeoutTime = System.currentTimeMillis() + 800;
+ long timeoutTime = EnvironmentEdgeManager.currentTimeMillis() + 800;
while (true) {
List regions =
regionStates.getRegionsOfTable(Bytes.toBytes(table));
if (!regions.contains(hri)) break;
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now > timeoutTime) {
fail("Failed to offline the region in time");
break;
@@ -153,14 +154,14 @@
TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),
Bytes.toBytes(destServerName.getServerName()));
- long timeoutTime = System.currentTimeMillis() + 800;
+ long timeoutTime = EnvironmentEdgeManager.currentTimeMillis() + 800;
while (true) {
ServerName sn = regionStates.getRegionServerOfRegion(hri);
if (sn != null && sn.equals(destServerName)) {
TEST_UTIL.assertRegionOnServer(hri, sn, 200);
break;
}
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now > timeoutTime) {
fail("Failed to move the region in time");
}
@@ -180,14 +181,14 @@
// wait till the table is assigned
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
- long timeoutTime = System.currentTimeMillis() + 100;
+ long timeoutTime = EnvironmentEdgeManager.currentTimeMillis() + 100;
while (true) {
List regions = master.getAssignmentManager().
getRegionStates().getRegionsOfTable(Bytes.toBytes(tableName));
if (regions.size() > 3) {
return regions.get(2);
}
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now > timeoutTime) {
fail("Could not find an online region");
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java (working copy)
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -209,11 +210,11 @@
waitForRSShutdownToStartAndFinish(activeMaster,
metaServer.getRegionServer().getServerName());
log("Waiting for no more RIT");
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
do {
blockUntilNoRIT(zkw, master);
} while (getNumberOfOnlineRegions(cluster) < numRegions
- && System.currentTimeMillis()-start < 60000);
+ && EnvironmentEdgeManager.currentTimeMillis()-start < 60000);
log("Verifying there are " + numRegions + " assigned on cluster");
assertRegionsAssigned(cluster, regions);
assertEquals(expectedNumRS, cluster.getRegionServerThreads().size());
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.MockServer;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
@@ -128,7 +129,7 @@
when(rsm.isRegionInTransition(hri)).thenReturn(false);
when(rsm.getRegionState(hri)).thenReturn(
new RegionState(region.getRegionInfo(), RegionState.State.OPEN,
- System.currentTimeMillis(), server.getServerName()));
+ EnvironmentEdgeManager.currentTimeMillis(), server.getServerName()));
// create a node with OPENED state
zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
region, server.getServerName());
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (working copy)
@@ -206,7 +206,7 @@
TaskBatch batch = new TaskBatch();
slm.enqueueSplitTask(logfiles[0].getPath().toString(), batch);
//waitForCounter but for one of the 2 counters
- long curt = System.currentTimeMillis();
+ long curt = EnvironmentEdgeManager.currentTimeMillis();
long waitTime = 80000;
long endt = curt + waitTime;
while (curt < endt) {
@@ -214,7 +214,7 @@
tot_wkr_final_transistion_failed.get() + tot_wkr_task_done.get() +
tot_wkr_preempt_task.get()) == 0) {
Thread.yield();
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
} else {
assertEquals(1, (tot_wkr_task_resigned.get() + tot_wkr_task_err.get() +
tot_wkr_final_transistion_failed.get() + tot_wkr_task_done.get() +
@@ -416,9 +416,9 @@
byte [] family = Bytes.toBytes("f");
byte [] qualifier = Bytes.toBytes("c" + Integer.toString(i));
e.add(new KeyValue(row, family, qualifier,
- System.currentTimeMillis(), value));
+ EnvironmentEdgeManager.currentTimeMillis(), value));
j++;
- log.append(hris.get(j % n), table, e, System.currentTimeMillis(), htd);
+ log.append(hris.get(j % n), table, e, EnvironmentEdgeManager.currentTimeMillis(), htd);
counts[j % n] += 1;
}
}
@@ -472,12 +472,12 @@
private void waitForCounter(AtomicLong ctr, long oldval, long newval,
long timems) {
- long curt = System.currentTimeMillis();
+ long curt = EnvironmentEdgeManager.currentTimeMillis();
long endt = curt + timems;
while (curt < endt) {
if (ctr.get() == oldval) {
Thread.yield();
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
} else {
assertEquals(newval, ctr.get());
return;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -74,7 +75,7 @@
} catch(KeeperException.NoNodeException nne) {}
// Create the master node with a dummy address
- ServerName master = new ServerName("localhost", 1, System.currentTimeMillis());
+ ServerName master = new ServerName("localhost", 1, EnvironmentEdgeManager.currentTimeMillis());
// Should not have a master yet
DummyMaster dummyMaster = new DummyMaster(zk,master);
ClusterStatusTracker clusterStatusTracker =
@@ -117,9 +118,9 @@
// Create the master node with a dummy address
ServerName firstMasterAddress =
- new ServerName("localhost", 1, System.currentTimeMillis());
+ new ServerName("localhost", 1, EnvironmentEdgeManager.currentTimeMillis());
ServerName secondMasterAddress =
- new ServerName("localhost", 2, System.currentTimeMillis());
+ new ServerName("localhost", 2, EnvironmentEdgeManager.currentTimeMillis());
// Should not have a master yet
DummyMaster ms1 = new DummyMaster(zk,firstMasterAddress);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (working copy)
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Threads;
@@ -803,13 +804,13 @@
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
log("No more RIT in ZK");
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
final long maxTime = 120000;
boolean done = master.assignmentManager.waitUntilNoRegionsInTransition(maxTime);
if (!done) {
LOG.info("rit=" + master.getAssignmentManager().getRegionStates().getRegionsInTransition());
}
- long elapsed = System.currentTimeMillis() - now;
+ long elapsed = EnvironmentEdgeManager.currentTimeMillis() - now;
assertTrue("Elapsed=" + elapsed + ", maxTime=" + maxTime + ", done=" + done,
elapsed < maxTime);
log("No more RIT in RIT map, doing final test verification");
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java (working copy)
@@ -30,6 +30,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -82,7 +83,7 @@
LOG.debug("regionServerStartup 1");
InetAddress ia1 = InetAddress.getLocalHost();
- sm.regionServerStartup(ia1, 1234, -1, System.currentTimeMillis());
+ sm.regionServerStartup(ia1, 1234, -1, EnvironmentEdgeManager.currentTimeMillis());
final Configuration c = HBaseConfiguration.create();
long maxSkew = c.getLong("hbase.master.maxclockskew", 30000);
@@ -91,7 +92,7 @@
try {
LOG.debug("regionServerStartup 2");
InetAddress ia2 = InetAddress.getLocalHost();
- sm.regionServerStartup(ia2, 1235, -1, System.currentTimeMillis() - maxSkew * 2);
+ sm.regionServerStartup(ia2, 1235, -1, EnvironmentEdgeManager.currentTimeMillis() - maxSkew * 2);
fail("HMaster should have thrown an ClockOutOfSyncException but didn't.");
} catch(ClockOutOfSyncException e) {
//we want an exception
@@ -101,7 +102,7 @@
// make sure values above warning threshold but below max threshold don't kill
LOG.debug("regionServerStartup 3");
InetAddress ia3 = InetAddress.getLocalHost();
- sm.regionServerStartup(ia3, 1236, -1, System.currentTimeMillis() - warningSkew * 2);
+ sm.regionServerStartup(ia3, 1236, -1, EnvironmentEdgeManager.currentTimeMillis() - warningSkew * 2);
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -67,7 +68,7 @@
String unassignedZNode = zooKeeper.assignmentZNode;
ZKUtil.createAndFailSilent(zooKeeper, unassignedZNode);
- ServerName sn = new ServerName(HMaster.MASTER, -1, System.currentTimeMillis());
+ ServerName sn = new ServerName(HMaster.MASTER, -1, EnvironmentEdgeManager.currentTimeMillis());
ZKAssign.createNodeOffline(zooKeeper, HRegionInfo.ROOT_REGIONINFO, sn);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java (working copy)
@@ -55,6 +55,7 @@
import org.apache.hadoop.hbase.master.SplitLogManager.Task;
import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
import org.apache.hadoop.hbase.regionserver.TestMasterAddressManager.NodeCreationListener;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -154,7 +155,7 @@
private void waitForCounter(Expr e, long oldval, long newval,
long timems) {
- long curt = System.currentTimeMillis();
+ long curt = EnvironmentEdgeManager.currentTimeMillis();
long endt = curt + timems;
while (curt < endt) {
if (e.eval() == oldval) {
@@ -162,7 +163,7 @@
Thread.sleep(10);
} catch (InterruptedException eintr) {
}
- curt = System.currentTimeMillis();
+ curt = EnvironmentEdgeManager.currentTimeMillis();
} else {
assertEquals(newval, e.eval());
return;
@@ -229,7 +230,7 @@
assertTrue(task.isOrphan());
waitForCounter(tot_mgr_heartbeat, 0, 1, 100);
assertFalse(task.isUnassigned());
- long curt = System.currentTimeMillis();
+ long curt = EnvironmentEdgeManager.currentTimeMillis();
assertTrue((task.last_update <= curt) &&
(task.last_update > (curt - 1000)));
LOG.info("waiting for manager to resubmit the orphan task");
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java (working copy)
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
@@ -54,7 +55,7 @@
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
// Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
fs.delete(archivedHfileDir, true);
fs.mkdirs(archivedHfileDir);
// Case 1: 1 invalid file, which would be deleted directly
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.replication.ReplicationZookeeper;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -80,7 +81,7 @@
LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir);
// Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
fs.delete(oldLogDir, true);
fs.mkdirs(oldLogDir);
// Case 1: 2 invalid files, which would be deleted directly
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java (working copy)
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -135,7 +136,7 @@
@Test
public void testFailover()
throws IOException, KeeperException, InterruptedException, ServiceException {
- final long now = System.currentTimeMillis();
+ final long now = EnvironmentEdgeManager.currentTimeMillis();
// Names for our three servers. Make the port numbers match hostname.
// Will come in use down in the server when we need to figure how to respond.
final ServerName sn0 = new ServerName("0.example.org", 0, now);
@@ -249,7 +250,7 @@
conf.setInt("hbase.master.wait.on.regionservers.mintostart", 1);
conf.setInt("hbase.master.wait.on.regionservers.maxtostart", 1);
- final long now = System.currentTimeMillis();
+ final long now = EnvironmentEdgeManager.currentTimeMillis();
// Name for our single mocked up regionserver.
final ServerName sn = new ServerName("0.example.org", 0, now);
// Here is our mocked up regionserver. Create it now. Need it setting up
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.client.coprocessor.Exec;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
@@ -89,8 +90,8 @@
}
// sleep here is an ugly hack to allow region transitions to finish
- long timeout = System.currentTimeMillis() + (15 * 1000);
- while ((System.currentTimeMillis() < timeout) &&
+ long timeout = EnvironmentEdgeManager.currentTimeMillis() + (15 * 1000);
+ while ((EnvironmentEdgeManager.currentTimeMillis() < timeout) &&
(table.getRegionsInfo().size() != 2)) {
Thread.sleep(250);
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java (working copy)
@@ -411,7 +411,7 @@
EvenOnlyCompactor compactor = (EvenOnlyCompactor)cp;
// force a compaction
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
admin.flush(compactTable);
// wait for flush
for (int i=0; i<10; i++) {
@@ -519,7 +519,7 @@
.withPath(fs, path)
.withComparator(KeyValue.KEY_COMPARATOR)
.create();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
try {
for (int i =1;i<=9;i++) {
KeyValue kv = new KeyValue(Bytes.toBytes(i+""), family, qualifier, now, Bytes.toBytes(i+""));
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
@@ -53,7 +54,7 @@
final Put put, final WALEdit edit,
final boolean writeToWAL)
throws IOException {
- id = System.currentTimeMillis();
+ id = EnvironmentEdgeManager.currentTimeMillis();
try {
Thread.sleep(10);
} catch (InterruptedException ex) {
@@ -68,7 +69,7 @@
final Put put, final WALEdit edit,
final boolean writeToWAL)
throws IOException {
- id = System.currentTimeMillis();
+ id = EnvironmentEdgeManager.currentTimeMillis();
try {
Thread.sleep(10);
} catch (InterruptedException ex) {
@@ -84,7 +85,7 @@
final Put put, final WALEdit edit,
final boolean writeToWAL)
throws IOException {
- id = System.currentTimeMillis();
+ id = EnvironmentEdgeManager.currentTimeMillis();
try {
Thread.sleep(10);
} catch (InterruptedException ex) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
@@ -108,7 +109,7 @@
@Test
public void testPartialRead() throws Exception {
HLog log = new HLog(fs, logDir, oldLogDir, conf);
- long ts = System.currentTimeMillis();
+ long ts = EnvironmentEdgeManager.currentTimeMillis();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
ts, value));
@@ -122,7 +123,7 @@
log.rollWriter();
Thread.sleep(1);
- long ts1 = System.currentTimeMillis();
+ long ts1 = EnvironmentEdgeManager.currentTimeMillis();
edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"),
@@ -167,21 +168,21 @@
byte [] value = Bytes.toBytes("value");
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
- System.currentTimeMillis(), value));
+ EnvironmentEdgeManager.currentTimeMillis(), value));
log.append(info, tableName, edit,
- System.currentTimeMillis(), htd);
+ EnvironmentEdgeManager.currentTimeMillis(), htd);
Thread.sleep(1); // make sure 2nd log gets a later timestamp
- long secondTs = System.currentTimeMillis();
+ long secondTs = EnvironmentEdgeManager.currentTimeMillis();
log.rollWriter();
edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
- System.currentTimeMillis(), value));
+ EnvironmentEdgeManager.currentTimeMillis(), value));
log.append(info, tableName, edit,
- System.currentTimeMillis(), htd);
+ EnvironmentEdgeManager.currentTimeMillis(), htd);
log.close();
- long thirdTs = System.currentTimeMillis();
+ long thirdTs = EnvironmentEdgeManager.currentTimeMillis();
// should have 2 log files now
HLogInputFormat input = new HLogInputFormat();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.GenericOptionsParser;
import org.junit.After;
@@ -59,7 +60,7 @@
private static final String OUTPUT_DIR = "outputdir";
private static MiniHBaseCluster cluster;
- private static long now = System.currentTimeMillis();
+ private static long now = EnvironmentEdgeManager.currentTimeMillis();
@BeforeClass
public static void beforeClass() throws Exception {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.*;
import org.junit.experimental.categories.Category;
@@ -213,7 +214,7 @@
.withCompression(COMPRESSION)
.withComparator(KeyValue.KEY_COMPARATOR)
.create();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java (working copy)
@@ -58,6 +58,7 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.compress.Compressor;
@@ -638,11 +639,11 @@
@Override
public Boolean call() throws Exception {
Random rand = new Random(clientId.hashCode());
- long endTime = System.currentTimeMillis() + 10000;
+ long endTime = EnvironmentEdgeManager.currentTimeMillis() + 10000;
int numBlocksRead = 0;
int numPositionalRead = 0;
int numWithOnDiskSize = 0;
- while (System.currentTimeMillis() < endTime) {
+ while (EnvironmentEdgeManager.currentTimeMillis() < endTime) {
int blockId = rand.nextInt(NUM_TEST_BLOCKS);
long offset = offsets.get(blockId);
boolean pread = rand.nextBoolean();
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.compress.CompressionCodec;
@@ -65,12 +66,12 @@
}
public void startTime() {
- startTimeEpoch = System.currentTimeMillis();
+ startTimeEpoch = EnvironmentEdgeManager.currentTimeMillis();
System.out.println(formatTime() + " Started timing.");
}
public void stopTime() {
- finishTimeEpoch = System.currentTimeMillis();
+ finishTimeEpoch = EnvironmentEdgeManager.currentTimeMillis();
System.out.println(formatTime() + " Stopped timing.");
}
@@ -90,7 +91,7 @@
}
public String formatTime(){
- return formatTime(System.currentTimeMillis());
+ return formatTime(EnvironmentEdgeManager.currentTimeMillis());
}
private FSDataOutputStream createFSOutput(Path name) throws IOException {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/RandomSeek.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* Random seek test.
@@ -65,7 +66,7 @@
LocalFileSystem lfs = new LocalFileSystem(rlfs);
Path path = new Path("/Users/ryan/rfile.big.txt");
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
SimpleBlockCache cache = new SimpleBlockCache();
CacheConfig cacheConf = new CacheConfig(cache, true, false, false, false,
false, false, false);
@@ -73,7 +74,7 @@
Reader reader = HFile.createReader(lfs, path, cacheConf);
reader.loadFileInfo();
System.out.println(reader.getTrailer());
- long end = System.currentTimeMillis();
+ long end = EnvironmentEdgeManager.currentTimeMillis();
System.out.println("Index read time: " + (end - start));
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java (working copy)
@@ -759,11 +759,11 @@
int lastCount = 0;
- final long start = System.currentTimeMillis();
+ final long start = EnvironmentEdgeManager.currentTimeMillis();
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for queueFailover replication. " +
- "Waited "+(System.currentTimeMillis() - start)+"ms.");
+ "Waited "+(EnvironmentEdgeManager.currentTimeMillis() - start)+"ms.");
}
Scan scan2 = new Scan();
ResultScanner scanner2 = htable2.getScanner(scan2);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java (working copy)
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
import org.apache.hadoop.hbase.replication.ReplicationZookeeper;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.After;
@@ -180,7 +181,7 @@
}
LOG.info(i);
HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
- System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
+ EnvironmentEdgeManager.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
hlog.append(hri, key, edit, htd, true);
}
@@ -193,7 +194,7 @@
for (int i = 0; i < 3; i++) {
HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
- System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
+ EnvironmentEdgeManager.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
hlog.append(hri, key, edit, htd, true);
}
@@ -205,7 +206,7 @@
"1", 0, false);
HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
- System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
+ EnvironmentEdgeManager.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
hlog.append(hri, key, edit, htd, true);
assertEquals(1, manager.getHLogs().size());
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java (working copy)
@@ -37,6 +37,7 @@
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -228,7 +229,7 @@
} catch (InterruptedException e) {
LOG.info("Was interrupted while sleep, meh", e);
}
- final long now = System.currentTimeMillis();
+ final long now = EnvironmentEdgeManager.currentTimeMillis();
KeyValue kv = null;
if(type.getCode() == KeyValue.Type.Put.getCode()) {
kv = new KeyValue(rowBytes, fam, fam, now,
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.After;
@@ -93,7 +94,7 @@
ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(),
this.getClass().getSimpleName(), ABORTABLE, true);
// This is a servername we use in a few places below.
- ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis());
+ ServerName sn = new ServerName("example.com", 1234, EnvironmentEdgeManager.currentTimeMillis());
HConnection connection = null;
CatalogTracker ct = null;
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.AfterClass;
@@ -124,7 +125,7 @@
// it as a fail. We can't put that in the @Test tag as we want to close
// the threads nicely
final long timeOut = 180000;
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
// Make sure reader and writer are working.
@@ -140,7 +141,7 @@
do {
index = UTIL.getMiniHBaseCluster().getServerWithMeta();
} while (index == -1 &&
- startTime + timeOut < System.currentTimeMillis());
+ startTime + timeOut < EnvironmentEdgeManager.currentTimeMillis());
if (index != -1){
UTIL.getMiniHBaseCluster().abortRegionServer(index);
@@ -159,7 +160,7 @@
writer.join();
t.close();
}
- long exeTime = System.currentTimeMillis() - startTime;
+ long exeTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
assertTrue("Timeout: test took " + exeTime / 1000 + " sec", exeTime < timeOut);
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTrackerOnCluster.java (working copy)
@@ -22,6 +22,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
@@ -61,7 +62,7 @@
}
});
ServerName nonsense =
- new ServerName("example.org", 1234, System.currentTimeMillis());
+ new ServerName("example.org", 1234, EnvironmentEdgeManager.currentTimeMillis());
RootRegionTracker.setRootLocation(zookeeper, nonsense);
// Bring back up the hbase cluster. See if it can deal with nonsense root
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java (revision 1367675)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java (working copy)
@@ -55,6 +55,7 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
@@ -83,7 +84,7 @@
private static final Log LOG = LogFactory.getLog(TestCatalogTracker.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final ServerName SN =
- new ServerName("example.org", 1234, System.currentTimeMillis());
+ new ServerName("example.org", 1234, EnvironmentEdgeManager.currentTimeMillis());
private ZooKeeperWatcher watcher;
private Abortable abortable;
@@ -137,7 +138,7 @@
constructAndStartCatalogTracker(connection);
try {
RootRegionTracker.setRootLocation(this.watcher,
- new ServerName("example.com", 1234, System.currentTimeMillis()));
+ new ServerName("example.com", 1234, EnvironmentEdgeManager.currentTimeMillis()));
} finally {
// Clean out root location or later tests will be confused... they presume
// start fresh in zk.
@@ -349,7 +350,7 @@
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
try {
RootRegionTracker.setRootLocation(this.watcher,
- new ServerName("example.com", 1234, System.currentTimeMillis()));
+ new ServerName("example.com", 1234, EnvironmentEdgeManager.currentTimeMillis()));
Assert.assertFalse(ct.verifyRootRegionLocation(100));
} finally {
// Clean out root location or later tests will be confused... they presume
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java (working copy)
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
@@ -321,9 +322,9 @@
case SyncConnected:
// Now, this callback can be invoked before the this.zookeeper is set.
// Wait a little while.
- long finished = System.currentTimeMillis() +
+ long finished = EnvironmentEdgeManager.currentTimeMillis() +
this.conf.getLong("hbase.zookeeper.watcher.sync.connected.wait", 2000);
- while (System.currentTimeMillis() < finished) {
+ while (EnvironmentEdgeManager.currentTimeMillis() < finished) {
Threads.sleep(1);
if (this.recoverableZooKeeper != null) break;
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java (working copy)
@@ -39,6 +39,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.persistence.FileTxnLog;
@@ -311,7 +312,7 @@
// XXX: From o.a.zk.t.ClientBase
private static boolean waitForServerDown(int port, long timeout) {
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
while (true) {
try {
Socket sock = new Socket("localhost", port);
@@ -326,7 +327,7 @@
return true;
}
- if (System.currentTimeMillis() > start + timeout) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > start + timeout) {
break;
}
try {
@@ -340,7 +341,7 @@
// XXX: From o.a.zk.t.ClientBase
private static boolean waitForServerUp(int port, long timeout) {
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
while (true) {
try {
Socket sock = new Socket("localhost", port);
@@ -367,7 +368,7 @@
LOG.info("server localhost:" + port + " not up " + e);
}
- if (System.currentTimeMillis() > start + timeout) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > start + timeout) {
break;
}
try {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (working copy)
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.CreateMode;
@@ -1212,7 +1213,7 @@
byte[] data = null;
boolean finished = false;
- final long endTime = System.currentTimeMillis() + timeout;
+ final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout;
while (!finished) {
try {
data = ZKUtil.getData(zkw, znode);
@@ -1220,7 +1221,7 @@
LOG.warn("Unexpected exception handling blockUntilAvailable", e);
}
- if (data == null && (System.currentTimeMillis() +
+ if (data == null && (EnvironmentEdgeManager.currentTimeMillis() +
HConstants.SOCKET_RETRY_WAIT_MS < endTime)) {
Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
} else {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java (working copy)
@@ -24,6 +24,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.KeeperException;
/**
@@ -127,7 +128,7 @@
throws InterruptedException {
if (timeout < 0) throw new IllegalArgumentException();
boolean notimeout = timeout == 0;
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long remaining = timeout;
if (refresh) {
try {
@@ -166,7 +167,7 @@
// We expect a notification; but we wait with a
// a timeout to lower the impact of a race condition if any
wait(100);
- remaining = timeout - (System.currentTimeMillis() - startTime);
+ remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
return this.data;
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java (working copy)
@@ -25,6 +25,7 @@
import java.net.InetSocketAddress;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.VersionedWritable;
import org.apache.hadoop.io.WritableComparable;
@@ -57,7 +58,7 @@
* @param webuiport Port the webui runs on.
*/
public HServerInfo(final HServerAddress serverAddress, final int webuiport) {
- this(serverAddress, System.currentTimeMillis(), webuiport);
+ this(serverAddress, EnvironmentEdgeManager.currentTimeMillis(), webuiport);
}
public HServerInfo(HServerAddress serverAddress, long startCode,
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private
@@ -89,7 +90,7 @@
scan.setCacheBlocks(false);
scanner = table.getScanner(scan);
cached = null;
- id = Long.toString(System.currentTimeMillis()) +
+ id = Long.toString(EnvironmentEdgeManager.currentTimeMillis()) +
Integer.toHexString(scanner.hashCode());
} finally {
table.close();
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java (working copy)
@@ -40,6 +40,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* A wrapper around HttpClient which provides some useful function and
@@ -141,9 +142,9 @@
method.addRequestHeader(header);
}
}
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
int code = httpClient.executeMethod(method);
- long endTime = System.currentTimeMillis();
+ long endTime = EnvironmentEdgeManager.currentTimeMillis();
if (LOG.isDebugEnabled()) {
LOG.debug(method.getName() + " " + uri + " " + code + " " +
method.getStatusText() + " in " + (endTime - startTime) + " ms");
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java (working copy)
@@ -22,6 +22,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Sleeper;
@@ -61,7 +62,7 @@
try {
boolean initialChoreComplete = false;
while (!this.stopper.isStopped()) {
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
if (!initialChoreComplete) {
initialChoreComplete = initialChore();
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java (working copy)
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.client.Operation;
import org.apache.hadoop.hbase.io.WritableWithSize;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.Writable;
import org.codehaus.jackson.map.ObjectMapper;
@@ -195,7 +196,7 @@
long queueTime) {
this.methodName = methodName;
this.params = params;
- this.rpcStartTime = System.currentTimeMillis();
+ this.rpcStartTime = EnvironmentEdgeManager.currentTimeMillis();
this.rpcQueueTime = queueTime;
this.state = State.RUNNING;
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java (working copy)
@@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.monitoring;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.codehaus.jackson.map.ObjectMapper;
import java.io.IOException;
@@ -38,7 +39,7 @@
protected volatile State state = State.RUNNING;
public MonitoredTaskImpl() {
- startTime = System.currentTimeMillis();
+ startTime = EnvironmentEdgeManager.currentTimeMillis();
statusTime = startTime;
stateTime = startTime;
}
@@ -117,12 +118,12 @@
@Override
public void setStatus(String status) {
this.status = status;
- statusTime = System.currentTimeMillis();
+ statusTime = EnvironmentEdgeManager.currentTimeMillis();
}
protected void setState(State state) {
this.state = state;
- stateTime = System.currentTimeMillis();
+ stateTime = EnvironmentEdgeManager.currentTimeMillis();
}
@Override
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java (working copy)
@@ -26,6 +26,7 @@
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
@@ -55,7 +56,7 @@
* older messages until the desired memory limit is achieved.
*/
public synchronized void add(String messageText) {
- LogMessage message = new LogMessage(messageText, System.currentTimeMillis());
+ LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTimeMillis());
usage += message.estimateHeapUsage();
messages.add(message);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
@@ -141,12 +142,12 @@
private boolean canPurge(MonitoredTask stat) {
long cts = stat.getCompletionTimestamp();
- return (cts > 0 && System.currentTimeMillis() - cts > EXPIRATION_TIME);
+ return (cts > 0 && EnvironmentEdgeManager.currentTimeMillis() - cts > EXPIRATION_TIME);
}
public void dumpAsText(PrintWriter out) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
List tasks = getTasks();
for (MonitoredTask task : tasks) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java (working copy)
@@ -20,6 +20,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
@@ -42,7 +43,7 @@
super(name, description);
this.value = 0;
this.prevRate = 0;
- this.ts = System.currentTimeMillis();
+ this.ts = EnvironmentEdgeManager.currentTimeMillis();
registry.add(name, this);
}
@@ -59,7 +60,7 @@
}
public synchronized void intervalHeartBeat() {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
long diff = (now-ts) / 1000;
if (diff < 1){
// To make sure our averages aren't skewed by fast repeated calls,
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.util.StringUtils;
@@ -93,7 +94,7 @@
}
if (logScannerActivity) {
LOG.info("Current scan=" + currentScan.toString());
- timestamp = System.currentTimeMillis();
+ timestamp = EnvironmentEdgeManager.currentTimeMillis();
rowcount = 0;
}
}
@@ -198,7 +199,7 @@
if (logScannerActivity) {
rowcount ++;
if (rowcount >= logPerRowCount) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
timestamp = now;
@@ -233,7 +234,7 @@
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
LOG.info(ioe);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (working copy)
@@ -155,7 +155,7 @@
Bytes.toString(tableName)
);
this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir);
- Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
+ Path logdir = new Path(tabledir, "merge_" + EnvironmentEdgeManager.currentTimeMillis() +
HConstants.HREGION_LOGDIR_NAME);
Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME);
this.hlog = new HLog(fs, logdir, oldLogDir, conf);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy)
@@ -534,7 +534,7 @@
throws IOException {
// Rewrite the file as pb. Move aside the old one first, write new
// then delete the moved-aside file.
- Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
+ Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTimeMillis());
if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
setClusterId(fs, rootdir, cid, 100);
if (!fs.delete(movedAsideName, false)) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java (working copy)
@@ -56,7 +56,7 @@
* Sleep for period.
*/
public void sleep() {
- sleep(System.currentTimeMillis());
+ sleep(EnvironmentEdgeManager.currentTimeMillis());
}
/**
@@ -79,11 +79,11 @@
if (this.stopper.isStopped()) {
return;
}
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
long waitTime = this.period - (now - startTime);
if (waitTime > this.period) {
LOG.warn("Calculated wait time > " + this.period +
- "; setting to this.period: " + System.currentTimeMillis() + ", " +
+ "; setting to this.period: " + EnvironmentEdgeManager.currentTimeMillis() + ", " +
startTime);
waitTime = this.period;
}
@@ -94,7 +94,7 @@
if (triggerWake) break;
sleepLock.wait(waitTime);
}
- woke = System.currentTimeMillis();
+ woke = EnvironmentEdgeManager.currentTimeMillis();
long slept = woke - now;
if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) {
LOG.warn("We slept " + slept + "ms instead of " + this.period +
@@ -110,7 +110,7 @@
}
}
// Recalculate waitTime.
- woke = (woke == -1)? System.currentTimeMillis(): woke;
+ woke = (woke == -1)? EnvironmentEdgeManager.currentTimeMillis(): woke;
waitTime = this.period - (woke - startTime);
}
triggerWake = false;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (working copy)
@@ -176,13 +176,13 @@
// Wait for an active master
// having an active master before starting the region threads allows
// then to succeed on their connection to master
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
while (findActiveMaster(masters) == null) {
try {
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
- if (System.currentTimeMillis() > startTime + 30000) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > startTime + 30000) {
throw new RuntimeException("Master not active after 30 seconds");
}
}
@@ -198,13 +198,13 @@
// Wait for an active master to be initialized (implies being master)
// with this, when we return the cluster is complete
- startTime = System.currentTimeMillis();
+ startTime = EnvironmentEdgeManager.currentTimeMillis();
while (true) {
JVMClusterUtil.MasterThread t = findActiveMaster(masters);
if (t != null && t.master.isInitialized()) {
return t.master.getServerName().toString();
}
- if (System.currentTimeMillis() > startTime + 200000) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > startTime + 200000) {
throw new RuntimeException("Master not initialized after 200 seconds");
}
try {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java (working copy)
@@ -120,8 +120,8 @@
public static void waitUntilAssigned(HBaseAdmin admin,
HRegionInfo region) throws IOException, InterruptedException {
long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000);
- long expiration = timeout + System.currentTimeMillis();
- while (System.currentTimeMillis() < expiration) {
+ long expiration = timeout + EnvironmentEdgeManager.currentTimeMillis();
+ while (EnvironmentEdgeManager.currentTimeMillis() < expiration) {
try {
Map rits=
admin.getClusterStatus().getRegionsInTransition();
@@ -154,8 +154,8 @@
ProtobufUtil.closeRegion(rs, region.getRegionName(), false);
long timeout = admin.getConfiguration()
.getLong("hbase.hbck.close.timeout", 120000);
- long expiration = timeout + System.currentTimeMillis();
- while (System.currentTimeMillis() < expiration) {
+ long expiration = timeout + EnvironmentEdgeManager.currentTimeMillis();
+ while (EnvironmentEdgeManager.currentTimeMillis() < expiration) {
try {
HRegionInfo rsRegion =
ProtobufUtil.getRegionInfo(rs, region.getRegionName());
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java (working copy)
@@ -33,10 +33,10 @@
/**
* {@inheritDoc}
*
- * This implementation returns {@link System#currentTimeMillis()}
+ * This implementation returns {@link EnvironmentEdgeManager.currentTimeMillis()}
*/
@Override
public long currentTimeMillis() {
- return System.currentTimeMillis();
+ return EnvironmentEdgeManager.currentTimeMillis();
}
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java (working copy)
@@ -68,7 +68,7 @@
return;
}
LOG.info("Recovering file " + p);
- long startWaiting = System.currentTimeMillis();
+ long startWaiting = EnvironmentEdgeManager.currentTimeMillis();
// Trying recovery
boolean recovered = false;
@@ -95,7 +95,7 @@
// within its soft limit, but if we get it past that, it means
// that the RS is holding onto the file even though it lost its
// znode. We could potentially abort after some time here.
- long waitedFor = System.currentTimeMillis() - startWaiting;
+ long waitedFor = EnvironmentEdgeManager.currentTimeMillis() - startWaiting;
if (waitedFor > LEASE_SOFTLIMIT_PERIOD) {
LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p +
":" + e.getMessage());
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java (working copy)
@@ -337,7 +337,7 @@
}
Delete delete = new Delete(regioninfo.getRegionName(),
- System.currentTimeMillis(), null);
+ EnvironmentEdgeManager.currentTimeMillis(), null);
meta.delete(delete, null, true);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (working copy)
@@ -161,7 +161,7 @@
private HBaseAdmin admin;
private HTable meta;
private ScheduledThreadPoolExecutor executor; // threads to retrieve data from regionservers
- private long startMillis = System.currentTimeMillis();
+ private long startMillis = EnvironmentEdgeManager.currentTimeMillis();
/***********
* Options
@@ -1119,7 +1119,7 @@
throw new IOException(e);
}
MetaEntry m =
- new MetaEntry(rootLocation.getRegionInfo(), sn, System.currentTimeMillis());
+ new MetaEntry(rootLocation.getRegionInfo(), sn, EnvironmentEdgeManager.currentTimeMillis());
HbckInfo hbInfo = new HbckInfo(m);
regionInfoMap.put(rootLocation.getRegionInfo().getEncodedName(), hbInfo);
return true;
@@ -1358,7 +1358,7 @@
(hbi.metaEntry == null)? false: hbi.metaEntry.isSplit() && hbi.metaEntry.isOffline();
boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry);
boolean recentlyModified = hbi.getHdfsRegionDir() != null &&
- hbi.getModTime() + timelag > System.currentTimeMillis();
+ hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTimeMillis();
// ========== First the healthy cases =============
if (hbi.containsOnlyHdfsEdits()) {
@@ -2171,7 +2171,7 @@
*/
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
List tableNames = new ArrayList();
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
for (HbckInfo hbi : regionInfoMap.values()) {
MetaEntry info = hbi.metaEntry;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/Threads.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/Threads.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/Threads.java (working copy)
@@ -143,7 +143,7 @@
* @param msToWait the amount of time to sleep in milliseconds
*/
public static void sleepWithoutInterrupt(final long msToWait) {
- long timeMillis = System.currentTimeMillis();
+ long timeMillis = EnvironmentEdgeManager.currentTimeMillis();
long endTime = timeMillis + msToWait;
boolean interrupted = false;
while (timeMillis < endTime) {
@@ -152,7 +152,7 @@
} catch (InterruptedException ex) {
interrupted = true;
}
- timeMillis = System.currentTimeMillis();
+ timeMillis = EnvironmentEdgeManager.currentTimeMillis();
}
if (interrupted) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (working copy)
@@ -98,7 +98,7 @@
public synchronized HLog getLog() throws IOException {
if (this.log == null) {
Path logdir = new Path(this.fs.getHomeDirectory(),
- HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
+ HConstants.HREGION_LOGDIR_NAME + "_" + EnvironmentEdgeManager.currentTimeMillis());
Path oldLogDir = new Path(this.fs.getHomeDirectory(),
HConstants.HREGION_OLDLOGDIR_NAME);
this.log = new HLog(this.fs, logdir, oldLogDir, this.conf);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (working copy)
@@ -427,7 +427,7 @@
daughterRegions.get(rsLocation).add(dr);
}
LOG.debug("Done with bucketing. Split time!");
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
// open the split file and modify it as splits finish
FSDataInputStream tmpIn = fs.open(splitFile);
@@ -545,7 +545,7 @@
+ " " + splitAlgo.rowToStr(region.getSecond()) + "\n");
splitCount++;
if (splitCount % 10 == 0) {
- long tDiff = (System.currentTimeMillis() - startTime)
+ long tDiff = (EnvironmentEdgeManager.currentTimeMillis() - startTime)
/ splitCount;
LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount
+ ". Avg Time / Split = "
@@ -571,7 +571,7 @@
}
LOG.debug("All regions have been successfully split!");
} finally {
- long tDiff = System.currentTimeMillis() - startTime;
+ long tDiff = EnvironmentEdgeManager.currentTimeMillis() - startTime;
LOG.debug("TOTAL TIME = "
+ org.apache.hadoop.util.StringUtils.formatTime(tDiff));
LOG.debug("Splits = " + splitCount);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Writables;
/**
@@ -411,7 +412,7 @@
HConstants.SPLITB_QUALIFIER));
HTable metaTable = getMetaTable();
- long start = System.currentTimeMillis();
+ long start = EnvironmentEdgeManager.currentTimeMillis();
Result resultA = getRegionResultBlocking(metaTable, blockingTimeout,
splitA.getRegionName());
if (resultA != null) {
@@ -421,7 +422,7 @@
throw new RegionOfflineException("Split daughter region " +
splitA.getRegionNameAsString() + " cannot be found in META.");
}
- long rem = blockingTimeout - (System.currentTimeMillis() - start);
+ long rem = blockingTimeout - (EnvironmentEdgeManager.currentTimeMillis() - start);
Result resultB = getRegionResultBlocking(metaTable, rem,
splitB.getRegionName());
@@ -442,8 +443,8 @@
if (LOG.isDebugEnabled()) {
LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName));
}
- long start = System.currentTimeMillis();
- while (System.currentTimeMillis() - start < timeout) {
+ long start = EnvironmentEdgeManager.currentTimeMillis();
+ while (EnvironmentEdgeManager.currentTimeMillis() - start < timeout) {
Get get = new Get(regionName);
Result result = metaTable.get(get);
HRegionInfo info = Writables.getHRegionInfoOrNull(
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java (working copy)
@@ -55,7 +55,7 @@
* Specifying timestamps, deleteFamily and deleteColumns will delete all
* versions with a timestamp less than or equal to that passed. If no
* timestamp is specified, an entry is added with a timestamp of 'now'
- * where 'now' is the servers's System.currentTimeMillis().
+ * where 'now' is the servers's EnvironmentEdgeManager.currentTimeMillis().
* Specifying a timestamp to the deleteColumn method will
* delete versions only with a timestamp equal to that specified.
* If no timestamp is passed to deleteColumn, internally, it figures the
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (working copy)
@@ -87,6 +87,7 @@
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SoftValueSortedMap;
import org.apache.hadoop.hbase.util.Triple;
@@ -1467,7 +1468,7 @@
--keepAliveZookeeperUserCount;
if (keepAliveZookeeperUserCount <=0 ){
keepZooKeeperWatcherAliveUntil =
- System.currentTimeMillis() + keepAlive;
+ EnvironmentEdgeManager.currentTimeMillis() + keepAlive;
}
}
}
@@ -1505,7 +1506,7 @@
}
protected void closeMasterProtocol(MasterProtocolState protocolState) {
- if (System.currentTimeMillis() > protocolState.keepAliveUntil) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > protocolState.keepAliveUntil) {
hci.closeMasterProtocol(protocolState);
protocolState.keepAliveUntil = Long.MAX_VALUE;
}
@@ -1515,7 +1516,7 @@
protected void chore() {
synchronized (hci.masterAndZKLock) {
if (hci.canCloseZKW) {
- if (System.currentTimeMillis() >
+ if (EnvironmentEdgeManager.currentTimeMillis() >
hci.keepZooKeeperWatcherAliveUntil) {
hci.closeZooKeeperWatcher();
@@ -1690,7 +1691,7 @@
--protocolState.userCount;
if (protocolState.userCount <= 0) {
protocolState.keepAliveUntil =
- System.currentTimeMillis() + keepAlive;
+ EnvironmentEdgeManager.currentTimeMillis() + keepAlive;
}
}
}
@@ -2127,12 +2128,12 @@
final Callable delegate = hci.createCallable(loc, multi, tableName);
return new Callable() {
- private final long creationTime = System.currentTimeMillis();
+ private final long creationTime = EnvironmentEdgeManager.currentTimeMillis();
@Override
public MultiResponse call() throws Exception {
try {
- final long waitingTime = delay + creationTime - System.currentTimeMillis();
+ final long waitingTime = delay + creationTime - EnvironmentEdgeManager.currentTimeMillis();
if (waitingTime > 0) {
Thread.sleep(waitingTime);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java (working copy)
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.DNS;
@@ -141,10 +142,10 @@
RequestConverter.buildScanRequest(scannerId, caching, false);
try {
ScanResponse response = server.scan(null, request);
- long timestamp = System.currentTimeMillis();
+ long timestamp = EnvironmentEdgeManager.currentTimeMillis();
rrs = ResponseConverter.getResults(response);
if (logScannerActivity) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now - timestamp > logCutOffLatency) {
int rows = rrs == null ? 0 : rrs.length;
LOG.info("Took " + (now-timestamp) + "ms to fetch "
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java (working copy)
@@ -37,6 +37,7 @@
import org.apache.hadoop.hbase.client.ClientProtocol;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
import com.google.protobuf.ServiceException;
@@ -115,12 +116,12 @@
public void beforeCall() {
HBaseRPC.setRpcTimeout(this.callTimeout);
- this.startTime = System.currentTimeMillis();
+ this.startTime = EnvironmentEdgeManager.currentTimeMillis();
}
public void afterCall() {
HBaseRPC.resetRpcTimeout();
- this.endTime = System.currentTimeMillis();
+ this.endTime = EnvironmentEdgeManager.currentTimeMillis();
}
public void shouldRetry(Throwable throwable) throws IOException {
@@ -183,7 +184,7 @@
}
RetriesExhaustedException.ThrowableWithExtraContext qt =
new RetriesExhaustedException.ThrowableWithExtraContext(t,
- System.currentTimeMillis(), toString());
+ EnvironmentEdgeManager.currentTimeMillis(), toString());
exceptions.add(qt);
if (tries == numRetries - 1) {
throw new RetriesExhaustedException(tries, exceptions);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.DataOutputBuffer;
/**
@@ -97,7 +98,7 @@
}
this.scan = scan;
this.tableName = tableName;
- this.lastNext = System.currentTimeMillis();
+ this.lastNext = EnvironmentEdgeManager.currentTimeMillis();
this.connection = connection;
if (scan.getMaxResultSize() > 0) {
this.maxScannerResultSize = scan.getMaxResultSize();
@@ -285,8 +286,8 @@
// If we are over the timeout, throw this exception to the client
// Else, it's because the region moved and we used the old id
// against the new region server; reset the scanner.
- if (timeout < System.currentTimeMillis()) {
- long elapsed = System.currentTimeMillis() - lastNext;
+ if (timeout < EnvironmentEdgeManager.currentTimeMillis()) {
+ long elapsed = EnvironmentEdgeManager.currentTimeMillis() - lastNext;
ScannerTimeoutException ex = new ScannerTimeoutException(
elapsed + "ms passed since the last invocation, " +
"timeout is currently set to " + scannerTimeout);
@@ -313,7 +314,7 @@
callable = null;
continue;
}
- long currentTime = System.currentTimeMillis();
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
if (this.scanMetrics != null ) {
this.scanMetrics.sumOfMillisSecBetweenNexts.inc(currentTime-lastNext);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.io.VersionedWritable;
@@ -219,7 +220,7 @@
public HRegionInfo090x(HTableDescriptor tableDesc, final byte[] startKey,
final byte[] endKey, final boolean split)
throws IllegalArgumentException {
- this(tableDesc, startKey, endKey, split, System.currentTimeMillis());
+ this(tableDesc, startKey, endKey, split, EnvironmentEdgeManager.currentTimeMillis());
}
/**
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java (working copy)
@@ -30,6 +30,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.thrift.CallQueue.Call;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.thrift.TException;
import org.apache.thrift.TProcessor;
@@ -219,7 +220,7 @@
long msLeftToWait =
serverOptions.stopTimeoutUnit.toMillis(serverOptions.stopTimeoutVal);
- long timeMillis = System.currentTimeMillis();
+ long timeMillis = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Waiting for up to " + msLeftToWait + " ms to finish processing" +
" pending requests");
@@ -229,7 +230,7 @@
executorService.awaitTermination(msLeftToWait, TimeUnit.MILLISECONDS);
break;
} catch (InterruptedException ix) {
- long timePassed = System.currentTimeMillis() - timeMillis;
+ long timePassed = EnvironmentEdgeManager.currentTimeMillis() - timeMillis;
msLeftToWait -= timePassed;
timeMillis += timePassed;
interrupted = true;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy)
@@ -520,7 +520,7 @@
@Override
public boolean progress() {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now - lastLog > this.interval) {
LOG.info("Opening " + this.hri.getRegionNameAsString());
this.lastLog = now;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java (working copy)
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}.
@@ -73,7 +74,7 @@
HConstants.DELIMITER) - this.rowoffset;
}
this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
- this.oldestts = System.currentTimeMillis() - ttl;
+ this.oldestts = EnvironmentEdgeManager.currentTimeMillis() - ttl;
this.kvcomparator = c;
KeyValue.RowComparator rc = new KeyValue.RowComparator(this.kvcomparator);
this.deletes = new TreeMap>(rc);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.regionserver.RegionServerAccounting;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.zookeeper.KeeperException;
@@ -169,7 +170,7 @@
// Total timeout for meta edit. If we fail adding the edit then close out
// the region and let it be assigned elsewhere.
long timeout = assignmentTimeout * 10;
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
long endTime = now + timeout;
// Let our period at which we update OPENING state to be be 1/3rd of the
// regions-in-transition timeout period.
@@ -191,7 +192,7 @@
// Go to the loop check.
}
}
- now = System.currentTimeMillis();
+ now = EnvironmentEdgeManager.currentTimeMillis();
}
// Is thread still alive? We may have left above loop because server is
// stopping or we timed out the edit. Is so, interrupt it.
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (working copy)
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import java.io.IOException;
@@ -49,7 +50,7 @@
private final AtomicBoolean rollLog = new AtomicBoolean(false);
private final Server server;
private final RegionServerServices services;
- private volatile long lastrolltime = System.currentTimeMillis();
+ private volatile long lastrolltime = EnvironmentEdgeManager.currentTimeMillis();
// Period to roll log.
private final long rollperiod;
private final int threadWakeFrequency;
@@ -68,7 +69,7 @@
@Override
public void run() {
while (!server.isStopped()) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
boolean periodic = false;
if (!rollLog.get()) {
periodic = (now - this.lastrolltime) > this.rollperiod;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy)
@@ -67,6 +67,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Threads;
@@ -619,7 +620,7 @@
if (currentFilenum > 0) {
oldPath = computeFilename(currentFilenum);
}
- this.filenum = System.currentTimeMillis();
+ this.filenum = EnvironmentEdgeManager.currentTimeMillis();
Path newPath = computeFilename();
// Tell our listeners that a new log is about to be created
@@ -1290,7 +1291,7 @@
}
try {
long doneUpto;
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// First flush all the pending writes to HDFS. Then
// issue the sync to HDFS. If sync is successful, then update
// syncedTillHere to indicate that transactions till this
@@ -1326,7 +1327,7 @@
}
this.syncedTillHere = Math.max(this.syncedTillHere, doneUpto);
- syncTime.inc(System.currentTimeMillis() - now);
+ syncTime.inc(EnvironmentEdgeManager.currentTimeMillis() - now);
if (!this.logRollRunning) {
checkLowReplication();
try {
@@ -1453,13 +1454,13 @@
}
}
try {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// coprocessor hook:
if (!coprocessorHost.preWALWrite(info, logKey, logEdit)) {
// write to our buffer for the Hlog file.
logSyncerThread.append(new HLog.Entry(logKey, logEdit));
}
- long took = System.currentTimeMillis() - now;
+ long took = EnvironmentEdgeManager.currentTimeMillis() - now;
coprocessorHost.postWALWrite(info, logKey, logEdit);
writeTime.inc(took);
long len = 0;
@@ -1582,13 +1583,13 @@
}
long txid = 0;
synchronized (updateLock) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
WALEdit edit = completeCacheFlushLogEdit();
HLogKey key = makeKey(encodedRegionName, tableName, logSeqId,
- System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
+ EnvironmentEdgeManager.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
logSyncerThread.append(new Entry(key, edit));
txid = this.unflushedEntries.incrementAndGet();
- writeTime.inc(System.currentTimeMillis() - now);
+ writeTime.inc(EnvironmentEdgeManager.currentTimeMillis() - now);
long len = 0;
for (KeyValue kv : edit.getKeyValues()) {
len += kv.getLength();
@@ -1610,7 +1611,7 @@
private WALEdit completeCacheFlushLogEdit() {
KeyValue kv = new KeyValue(METAROW, METAFAMILY, null,
- System.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
+ EnvironmentEdgeManager.currentTimeMillis(), COMPLETE_CACHE_FLUSH);
WALEdit e = new WALEdit();
e.add(kv);
return e;
@@ -1837,7 +1838,7 @@
final Path edits)
throws IOException {
Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." +
- System.currentTimeMillis());
+ EnvironmentEdgeManager.currentTimeMillis());
if (!fs.rename(edits, moveAsideName)) {
LOG.warn("Rename failed from " + edits + " to " + moveAsideName);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (working copy)
@@ -43,6 +43,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.util.StringUtils;
@@ -347,7 +348,7 @@
if (!fqe.region.getRegionInfo().isMetaRegion() &&
isTooManyStoreFiles(region)) {
if (fqe.isMaximumWait(this.blockingWaitTime)) {
- LOG.info("Waited " + (System.currentTimeMillis() - fqe.createTime) +
+ LOG.info("Waited " + (EnvironmentEdgeManager.currentTimeMillis() - fqe.createTime) +
"ms on a compaction to clean up 'too many store files'; waited " +
"long enough... proceeding with flush of " +
region.getRegionNameAsString());
@@ -526,7 +527,7 @@
FlushRegionEntry(final HRegion r) {
this.region = r;
- this.createTime = System.currentTimeMillis();
+ this.createTime = EnvironmentEdgeManager.currentTimeMillis();
this.whenToExpire = this.createTime;
}
@@ -535,7 +536,7 @@
* @return True if we have been delayed > maximumWait milliseconds.
*/
public boolean isMaximumWait(final long maximumWait) {
- return (System.currentTimeMillis() - this.createTime) > maximumWait;
+ return (EnvironmentEdgeManager.currentTimeMillis() - this.createTime) > maximumWait;
}
/**
@@ -548,19 +549,19 @@
/**
* @param when When to expire, when to come up out of the queue.
- * Specify in milliseconds. This method adds System.currentTimeMillis()
+ * Specify in milliseconds. This method adds EnvironmentEdgeManager.currentTimeMillis()
* to whatever you pass.
* @return This.
*/
public FlushRegionEntry requeue(final long when) {
- this.whenToExpire = System.currentTimeMillis() + when;
+ this.whenToExpire = EnvironmentEdgeManager.currentTimeMillis() + when;
this.requeueCount++;
return this;
}
@Override
public long getDelay(TimeUnit unit) {
- return unit.convert(this.whenToExpire - System.currentTimeMillis(),
+ return unit.convert(this.whenToExpire - EnvironmentEdgeManager.currentTimeMillis(),
TimeUnit.MILLISECONDS);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (working copy)
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
import com.yammer.metrics.stats.Snapshot;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.metrics.ContextFactory;
@@ -61,8 +62,8 @@
@SuppressWarnings({"FieldCanBeLocal"})
private final Log LOG = LogFactory.getLog(this.getClass());
private final MetricsRecord metricsRecord;
- private long lastUpdate = System.currentTimeMillis();
- private long lastExtUpdate = System.currentTimeMillis();
+ private long lastUpdate = EnvironmentEdgeManager.currentTimeMillis();
+ private long lastExtUpdate = EnvironmentEdgeManager.currentTimeMillis();
private long extendedPeriod = 0;
private static final int MB = 1024*1024;
private MetricsRegistry registry = new MetricsRegistry();
@@ -338,7 +339,7 @@
*/
public void doUpdates(MetricsContext caller) {
synchronized (this) {
- this.lastUpdate = System.currentTimeMillis();
+ this.lastUpdate = EnvironmentEdgeManager.currentTimeMillis();
// has the extended period for long-living stats elapsed?
if (this.extendedPeriod > 0 &&
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.SplitLogTask;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -268,7 +269,7 @@
SplitLogCounters.tot_wkr_task_acquired.incrementAndGet();
getDataSetWatchAsync();
- t = System.currentTimeMillis();
+ t = EnvironmentEdgeManager.currentTimeMillis();
TaskExecutor.Status status;
status = splitTaskExecutor.exec(ZKSplitLog.getFileName(currentTask),
@@ -313,7 +314,7 @@
} finally {
if (t > 0) {
LOG.info("worker " + serverName + " done with task " + path +
- " in " + (System.currentTimeMillis() - t) + "ms");
+ " in " + (EnvironmentEdgeManager.currentTimeMillis() - t) + "ms");
}
synchronized (grabTaskLock) {
workerInGrabTask = false;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (working copy)
@@ -34,6 +34,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.base.Preconditions;
@@ -84,7 +85,7 @@
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r);
- t.setName(n + "-largeCompactions-" + System.currentTimeMillis());
+ t.setName(n + "-largeCompactions-" + EnvironmentEdgeManager.currentTimeMillis());
return t;
}
});
@@ -96,7 +97,7 @@
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r);
- t.setName(n + "-smallCompactions-" + System.currentTimeMillis());
+ t.setName(n + "-smallCompactions-" + EnvironmentEdgeManager.currentTimeMillis());
return t;
}
});
@@ -108,7 +109,7 @@
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r);
- t.setName(n + "-splits-" + System.currentTimeMillis());
+ t.setName(n + "-splits-" + EnvironmentEdgeManager.currentTimeMillis());
return t;
}
});
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy)
@@ -1193,7 +1193,7 @@
}
// TODO: Use better method for determining stamp of last major (HBASE-2990)
long lowTimestamp = getLowestTimestamp(filesToCompact);
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) {
// Major compaction time has elapsed.
if (filesToCompact.size() == 1) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java (working copy)
@@ -22,6 +22,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
import java.util.ConcurrentModificationException;
@@ -279,7 +280,7 @@
}
public long getDelay(TimeUnit unit) {
- return unit.convert(this.expirationTime - System.currentTimeMillis(),
+ return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTimeMillis(),
TimeUnit.MILLISECONDS);
}
@@ -294,7 +295,7 @@
* Resets the expiration time of the lease.
*/
public void resetExpirationTime() {
- this.expirationTime = System.currentTimeMillis() + this.leaseTimeoutPeriod;
+ this.expirationTime = EnvironmentEdgeManager.currentTimeMillis() + this.leaseTimeoutPeriod;
}
}
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy)
@@ -514,7 +514,7 @@
this.rpcServer.setErrorHandler(this);
this.rpcServer.setQosFunction(new QosFunction());
- this.startcode = System.currentTimeMillis();
+ this.startcode = EnvironmentEdgeManager.currentTimeMillis();
// login the server principal (if using secure Hadoop)
User.login(this.conf, "hbase.regionserver.keytab.file",
@@ -796,11 +796,11 @@
LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString());
}
}
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if ((now - lastMsg) >= msgInterval) {
doMetrics();
tryRegionServerReport(lastMsg, now);
- lastMsg = System.currentTimeMillis();
+ lastMsg = EnvironmentEdgeManager.currentTimeMillis();
}
if (!this.stopped) this.sleeper.sleep();
} // for
@@ -977,8 +977,8 @@
// Only print a message if the count of regions has changed.
if (count != lastCount) {
// Log every second at most
- if (System.currentTimeMillis() > (previousLogTime + 1000)) {
- previousLogTime = System.currentTimeMillis();
+ if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) {
+ previousLogTime = EnvironmentEdgeManager.currentTimeMillis();
lastCount = count;
LOG.info("Waiting on " + count + " regions to close");
// Only print out regions still closing if a small number else will
@@ -1815,7 +1815,7 @@
return null;
}
LOG.debug("No master found; retry");
- previousLogTime = System.currentTimeMillis();
+ previousLogTime = EnvironmentEdgeManager.currentTimeMillis();
refresh = true; // let's try pull it from ZK directly
sleeper.sleep();
@@ -1839,14 +1839,14 @@
e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
if (e instanceof ServerNotRunningYetException) {
- if (System.currentTimeMillis() > (previousLogTime+1000)){
+ if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime+1000)){
LOG.info("Master isn't available yet, retrying");
- previousLogTime = System.currentTimeMillis();
+ previousLogTime = EnvironmentEdgeManager.currentTimeMillis();
}
} else {
- if (System.currentTimeMillis() > (previousLogTime + 1000)) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) {
LOG.warn("Unable to connect to master. Retrying. Error was:", e);
- previousLogTime = System.currentTimeMillis();
+ previousLogTime = EnvironmentEdgeManager.currentTimeMillis();
}
}
try {
@@ -3858,7 +3858,7 @@
}
protected void addToMovedRegions(String encodedName, ServerName destination){
- final Long time = System.currentTimeMillis();
+ final Long time = EnvironmentEdgeManager.currentTimeMillis();
movedRegions.put(
encodedName,
@@ -3869,7 +3869,7 @@
Pair dest = movedRegions.get(encodedRegionName);
if (dest != null) {
- if (dest.getFirst() > (System.currentTimeMillis() - TIMEOUT_REGION_MOVED)) {
+ if (dest.getFirst() > (EnvironmentEdgeManager.currentTimeMillis() - TIMEOUT_REGION_MOVED)) {
return dest.getSecond();
} else {
movedRegions.remove(encodedRegionName);
@@ -3883,7 +3883,7 @@
* Remove the expired entries from the moved regions list.
*/
protected void cleanMovedRegions(){
- final long cutOff = System.currentTimeMillis() - TIMEOUT_REGION_MOVED;
+ final long cutOff = EnvironmentEdgeManager.currentTimeMillis() - TIMEOUT_REGION_MOVED;
Iterator>> it = movedRegions.entrySet().iterator();
while (it.hasNext()){
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java (working copy)
@@ -26,6 +26,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
@@ -60,7 +61,7 @@
return;
}
try {
- final long startTime = System.currentTimeMillis();
+ final long startTime = EnvironmentEdgeManager.currentTimeMillis();
SplitTransaction st = new SplitTransaction(parent, midKey);
// If prepare does not return true, for some reason -- logged inside in
// the prepare call -- we are not ready to split just now. Just return.
@@ -99,7 +100,7 @@
+ parent.getRegionInfo().getRegionNameAsString() + ", new regions: "
+ st.getFirstDaughter().getRegionNameAsString() + ", "
+ st.getSecondDaughter().getRegionNameAsString() + ". Split took "
- + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
+ + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime));
} catch (IOException ex) {
LOG.error("Split failed " + this, RemoteExceptionHandler
.checkIOException(ex));
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* HBase Canary Tool, that that can be used to do
@@ -234,9 +235,9 @@
get.addFamily(column.getName());
try {
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
table.get(get);
- long time = System.currentTimeMillis() - startTime;
+ long time = EnvironmentEdgeManager.currentTimeMillis() - startTime;
sink.publishReadTiming(region, column, time);
} catch (Exception e) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java (working copy)
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
@@ -108,7 +109,7 @@
ZooKeeperProtos.RegionTransition.Builder builder = ZooKeeperProtos.RegionTransition.newBuilder().
setEventTypeCode(type.getCode()).setRegionName(ByteString.copyFrom(regionName)).
setOriginServerName(pbsn);
- builder.setCreateTime(System.currentTimeMillis());
+ builder.setCreateTime(EnvironmentEdgeManager.currentTimeMillis());
if (payload != null) builder.setPayload(ByteString.copyFrom(payload));
return new RegionTransition(builder.build());
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (working copy)
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MD5Hash;
@@ -257,7 +258,7 @@
public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey,
final boolean split)
throws IllegalArgumentException {
- this(tableName, startKey, endKey, split, System.currentTimeMillis());
+ this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTimeMillis());
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (working copy)
@@ -57,6 +57,7 @@
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.protobuf.ServiceException;
@@ -217,7 +218,7 @@
*/
private void checkClockSkew(final ServerName serverName, final long serverCurrentTime)
throws ClockOutOfSyncException {
- long skew = System.currentTimeMillis() - serverCurrentTime;
+ long skew = EnvironmentEdgeManager.currentTimeMillis() - serverCurrentTime;
if (skew > maxSkew) {
String message = "Server " + serverName + " has been " +
"rejected; Reported time is too far out of sync with master. " +
@@ -342,7 +343,7 @@
long previousLogTime = 0;
while (!onlineServers.isEmpty()) {
- if (System.currentTimeMillis() > (previousLogTime + 1000)) {
+ if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) {
StringBuilder sb = new StringBuilder();
for (ServerName key : this.onlineServers.keySet()) {
if (sb.length() > 0) {
@@ -351,7 +352,7 @@
sb.append(key);
}
LOG.info("Waiting on regionserver(s) to go down " + sb.toString());
- previousLogTime = System.currentTimeMillis();
+ previousLogTime = EnvironmentEdgeManager.currentTimeMillis();
}
synchronized (onlineServers) {
@@ -602,7 +603,7 @@
final int maxToStart = this.master.getConfiguration().
getInt("hbase.master.wait.on.regionservers.maxtostart", Integer.MAX_VALUE);
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
final long startTime = now;
long slept = 0;
long lastLogTime = 0;
@@ -631,7 +632,7 @@
// We sleep for some time
final long sleepTime = 50;
Thread.sleep(sleepTime);
- now = System.currentTimeMillis();
+ now = EnvironmentEdgeManager.currentTimeMillis();
slept = now - startTime;
oldCount = count;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy)
@@ -70,6 +70,7 @@
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
@@ -688,7 +689,7 @@
Lock lock = locker.acquireLock(encodedName);
try {
// Printing if the event was created a long time ago helps debugging
- boolean lateEvent = createTime < (System.currentTimeMillis() - 15000);
+ boolean lateEvent = createTime < (EnvironmentEdgeManager.currentTimeMillis() - 15000);
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
LOG.debug("Handling transition=" + rt.getEventType() +
", server=" + sn + ", region=" +
@@ -1312,7 +1313,7 @@
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException, the
// TimeoutMonitor will pick up the pieces.
- long maxWaitTime = System.currentTimeMillis() +
+ long maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() +
this.master.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
while (!this.master.isStopped()) {
@@ -1341,7 +1342,7 @@
} else if (decodedException instanceof ServerNotRunningYetException) {
// This is the one exception to retry. For all else we should just fail
// the startup.
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now > maxWaitTime) throw e;
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
@@ -1479,7 +1480,7 @@
// call to open risks our writing PENDING_OPEN after state has been moved
// to OPENING by the regionserver.
regionStates.updateRegionState(state.getRegion(),
- RegionState.State.PENDING_OPEN, System.currentTimeMillis(),
+ RegionState.State.PENDING_OPEN, EnvironmentEdgeManager.currentTimeMillis(),
destination);
this.counter.addAndGet(1);
}
@@ -1580,7 +1581,7 @@
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
regionStates.updateRegionState(state.getRegion(),
- RegionState.State.PENDING_OPEN, System.currentTimeMillis(),
+ RegionState.State.PENDING_OPEN, EnvironmentEdgeManager.currentTimeMillis(),
plan.getDestination());
// Send OPEN RPC. This can fail if the server on other end is is not up.
// Pass the version that was obtained while setting the node to OFFLINE.
@@ -2348,10 +2349,10 @@
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
- final long endTime = System.currentTimeMillis() + timeout;
+ final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout;
while (!this.master.isStopped() && regionStates.isRegionsInTransition()
- && endTime > System.currentTimeMillis()) {
+ && endTime > EnvironmentEdgeManager.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
@@ -2367,10 +2368,10 @@
*/
boolean waitUntilNoRegionsInTransition(final long timeout, Set regions)
throws InterruptedException {
- final long endTime = System.currentTimeMillis() + timeout;
+ final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout;
// We're not synchronizing on regionsInTransition now because we don't use any iterator.
- while (!regions.isEmpty() && !this.master.isStopped() && endTime > System.currentTimeMillis()) {
+ while (!regions.isEmpty() && !this.master.isStopped() && endTime > EnvironmentEdgeManager.currentTimeMillis()) {
Iterator regionInfoIterator = regions.iterator();
while (regionInfoIterator.hasNext()) {
HRegionInfo hri = regionInfoIterator.next();
@@ -2676,7 +2677,7 @@
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
- long currentTime = System.currentTimeMillis();
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
@@ -2807,7 +2808,7 @@
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (RegionState regionState : regionStates.getRegionsInTransition().values()) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy)
@@ -160,6 +160,7 @@
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.HasThread;
@@ -343,7 +344,7 @@
// Set our address.
this.isa = this.rpcServer.getListenerAddress();
this.serverName = new ServerName(this.isa.getHostName(),
- this.isa.getPort(), System.currentTimeMillis());
+ this.isa.getPort(), EnvironmentEdgeManager.currentTimeMillis());
this.rsFatals = new MemoryBoundedLogMessageBuffer(
conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
@@ -413,7 +414,7 @@
MonitoredTask startupStatus =
TaskMonitor.get().createStatus("Master startup");
startupStatus.setDescription("Master startup");
- masterStartTime = System.currentTimeMillis();
+ masterStartTime = EnvironmentEdgeManager.currentTimeMillis();
try {
/*
* Block on becoming the active master.
@@ -551,10 +552,10 @@
long lastMsgTs = 0l;
long now = 0l;
while (!this.stopped) {
- now = System.currentTimeMillis();
+ now = EnvironmentEdgeManager.currentTimeMillis();
if ((now - lastMsgTs) >= this.msgInterval) {
doMetrics();
- lastMsgTs = System.currentTimeMillis();
+ lastMsgTs = EnvironmentEdgeManager.currentTimeMillis();
}
stopSleeper.sleep();
}
@@ -605,7 +606,7 @@
*/
status.setStatus("Initializing Master file system");
- this.masterActiveTime = System.currentTimeMillis();
+ this.masterActiveTime = EnvironmentEdgeManager.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery);
@@ -1226,7 +1227,7 @@
if (!this.balanceSwitch) return false;
// Do this call outside of synchronized block.
int maximumBalanceTime = getBalancerCutoffTime();
- long cutoffTime = System.currentTimeMillis() + maximumBalanceTime;
+ long cutoffTime = EnvironmentEdgeManager.currentTimeMillis() + maximumBalanceTime;
boolean balancerRan;
synchronized (this.balancer) {
// Only allow one balance run at at time.
@@ -1272,13 +1273,13 @@
if (plans != null && !plans.isEmpty()) {
for (RegionPlan plan: plans) {
LOG.info("balance " + plan);
- long balStartTime = System.currentTimeMillis();
+ long balStartTime = EnvironmentEdgeManager.currentTimeMillis();
this.assignmentManager.balance(plan);
- totalRegPlanExecTime += System.currentTimeMillis()-balStartTime;
+ totalRegPlanExecTime += EnvironmentEdgeManager.currentTimeMillis()-balStartTime;
rpCount++;
if (rpCount < plans.size() &&
// if performing next balance exceeds cutoff time, exit the loop
- (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
+ (EnvironmentEdgeManager.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) {
LOG.debug("No more balancing till next balance run; maximumBalanceTime=" +
maximumBalanceTime);
break;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java (working copy)
@@ -37,6 +37,7 @@
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
/**
@@ -204,7 +205,7 @@
*/
public synchronized RegionState updateRegionState(
final HRegionInfo hri, final State state, final ServerName serverName) {
- return updateRegionState(hri, state, System.currentTimeMillis(), serverName);
+ return updateRegionState(hri, state, EnvironmentEdgeManager.currentTimeMillis(), serverName);
}
/**
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java (working copy)
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* State of a Region while undergoing transitions.
@@ -55,11 +56,11 @@
private volatile State state;
public RegionState() {
- this.stamp = new AtomicLong(System.currentTimeMillis());
+ this.stamp = new AtomicLong(EnvironmentEdgeManager.currentTimeMillis());
}
public RegionState(HRegionInfo region, State state) {
- this(region, state, System.currentTimeMillis(), null);
+ this(region, state, EnvironmentEdgeManager.currentTimeMillis(), null);
}
public RegionState(HRegionInfo region,
@@ -71,7 +72,7 @@
}
public void updateTimestampToNow() {
- this.stamp.set(System.currentTimeMillis());
+ this.stamp.set(EnvironmentEdgeManager.currentTimeMillis());
}
public State getState() {
@@ -139,7 +140,7 @@
*/
public String toDescriptiveString() {
long lstamp = stamp.get();
- long relTime = System.currentTimeMillis() - lstamp;
+ long relTime = EnvironmentEdgeManager.currentTimeMillis() - lstamp;
return region.getRegionNameAsString()
+ " state=" + state
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/metrics/MasterMetrics.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.metrics.HBaseInfo;
import org.apache.hadoop.hbase.metrics.MetricsRate;
import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.metrics.ContextFactory;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
@@ -50,8 +51,8 @@
private final MetricsRegistry registry = new MetricsRegistry();
private final MasterStatistics masterStatistics;
- private long lastUpdate = System.currentTimeMillis();
- private long lastExtUpdate = System.currentTimeMillis();
+ private long lastUpdate = EnvironmentEdgeManager.currentTimeMillis();
+ private long lastExtUpdate = EnvironmentEdgeManager.currentTimeMillis();
private long extendedPeriod = 0;
/*
* Count of requests to the cluster since last call to metrics update
@@ -114,7 +115,7 @@
*/
public void doUpdates(MetricsContext unused) {
synchronized (this) {
- this.lastUpdate = System.currentTimeMillis();
+ this.lastUpdate = EnvironmentEdgeManager.currentTimeMillis();
// has the extended period for long-living stats elapsed?
if (this.extendedPeriod > 0 &&
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.base.Joiner;
import com.google.common.collect.ArrayListMultimap;
@@ -49,7 +50,7 @@
// slop for regions
private float slop;
private Configuration config;
- private static final Random RANDOM = new Random(System.currentTimeMillis());
+ private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis());
private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class);
protected MasterServices services;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java (working copy)
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.common.collect.MinMaxPriorityQueue;
@@ -56,7 +57,7 @@
@InterfaceAudience.Private
public class DefaultLoadBalancer extends BaseLoadBalancer {
private static final Log LOG = LogFactory.getLog(DefaultLoadBalancer.class);
- private static final Random RANDOM = new Random(System.currentTimeMillis());
+ private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis());
private RegionInfoComparator riComparator = new RegionInfoComparator();
private RegionPlan.RegionPlanComparator rpComparator = new RegionPlan.RegionPlanComparator();
@@ -181,7 +182,7 @@
public List balanceCluster(
Map> clusterMap) {
boolean emptyRegionServerPresent = false;
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
ClusterLoadState cs = new ClusterLoadState(clusterMap);
@@ -318,7 +319,7 @@
// If none needed to fill all to min and none left to drain all to max,
// we are done
if (neededRegions == 0 && regionsToMove.isEmpty()) {
- long endTime = System.currentTimeMillis();
+ long endTime = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " +
"Moving " + totalNumMoved + " regions off of " +
serversOverloaded + " overloaded servers onto " +
@@ -396,7 +397,7 @@
}
}
- long endTime = System.currentTimeMillis();
+ long endTime = EnvironmentEdgeManager.currentTimeMillis();
if (!regionsToMove.isEmpty() || neededRegions != 0) {
// Emit data so can diagnose how balancer went astray.
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import java.util.ArrayList;
import java.util.HashMap;
@@ -105,7 +106,7 @@
private static final String MAX_STEPS_KEY = "hbase.master.balancer.stochastic.maxSteps";
private static final String MAX_MOVES_KEY = "hbase.master.balancer.stochastic.maxMoveRegions";
- private static final Random RANDOM = new Random(System.currentTimeMillis());
+ private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis());
private static final Log LOG = LogFactory.getLog(StochasticLoadBalancer.class);
private final RegionLocationFinder regionFinder = new RegionLocationFinder();
private ClusterStatus clusterStatus = null;
@@ -181,7 +182,7 @@
return null;
}
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
// Keep track of servers to iterate through them.
List servers = new ArrayList(clusterState.keySet());
@@ -246,7 +247,7 @@
}
- long endTime = System.currentTimeMillis();
+ long endTime = EnvironmentEdgeManager.currentTimeMillis();
if (initCost > currentCost) {
List plans = createRegionPlans(initialRegionMapping, clusterState);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java (working copy)
@@ -21,6 +21,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
@@ -40,7 +41,7 @@
@Override
public boolean isLogDeletable(Path filePath) {
long time = 0;
- long currentTime = System.currentTimeMillis();
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
try {
FileStatus fStat = filePath.getFileSystem(this.getConf()).getFileStatus(filePath);
time = fStat.getModificationTime();
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.KeeperException;
@@ -60,8 +61,8 @@
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
for (HRegionInfo region : regions) {
- long done = System.currentTimeMillis() + waitTime;
- while (System.currentTimeMillis() < done) {
+ long done = EnvironmentEdgeManager.currentTimeMillis() + waitTime;
+ while (EnvironmentEdgeManager.currentTimeMillis() < done) {
if (!am.getRegionStates().isRegionInTransition(region)) break;
Threads.sleep(waitingTimeForEvents);
LOG.debug("Waiting on region to clear regions in transition; "
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (working copy)
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.RegionStates;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.KeeperException;
/**
@@ -178,14 +179,14 @@
@Override
protected boolean waitUntilDone(long timeout)
throws InterruptedException {
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long remaining = timeout;
List regions = null;
while (!server.isStopped() && remaining > 0) {
Thread.sleep(waitingTimeForEvents);
regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
if (regions.isEmpty()) break;
- remaining = timeout - (System.currentTimeMillis() - startTime);
+ remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
return regions != null && regions.isEmpty();
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.KeeperException;
/**
@@ -219,7 +220,7 @@
@Override
protected boolean waitUntilDone(long timeout)
throws InterruptedException {
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
long remaining = timeout;
List regions = null;
int lastNumberOfRegions = 0;
@@ -234,7 +235,7 @@
lastNumberOfRegions = regions.size();
timeout += waitingTimeForEvents;
}
- remaining = timeout - (System.currentTimeMillis() - startTime);
+ remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
return isDone(regions);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.Server;
@@ -181,7 +182,7 @@
FileSystem fs = path.getFileSystem(HBaseConfiguration.create());
Path dst = new Path(System.getProperty("java.io.tmpdir") +
java.io.File.separator +"." + pathPrefix +
- "." + className + "." + System.currentTimeMillis() + ".jar");
+ "." + className + "." + EnvironmentEdgeManager.currentTimeMillis() + ".jar");
fs.copyToLocalFile(path, dst);
fs.deleteOnExit(dst);
@@ -207,7 +208,7 @@
if (entry.getName().matches("/lib/[^/]+\\.jar")) {
File file = new File(System.getProperty("java.io.tmpdir") +
java.io.File.separator +"." + pathPrefix +
- "." + className + "." + System.currentTimeMillis() + "." + entry.getName().substring(5));
+ "." + className + "." + EnvironmentEdgeManager.currentTimeMillis() + "." + entry.getName().substring(5));
IOUtils.copyBytes(jarFile.getInputStream(entry), new FileOutputStream(file), conf, true);
file.deleteOnExit();
paths.add(file.toURL());
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
@@ -84,7 +85,7 @@
this.scanner = this.htable.getScanner(currentScan);
if (logScannerActivity) {
LOG.info("Current scan=" + currentScan.toString());
- timestamp = System.currentTimeMillis();
+ timestamp = EnvironmentEdgeManager.currentTimeMillis();
rowcount = 0;
}
}
@@ -195,7 +196,7 @@
if (logScannerActivity) {
rowcount ++;
if (rowcount >= logPerRowCount) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
timestamp = now;
@@ -231,7 +232,7 @@
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
LOG.info(ioe);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java (working copy)
@@ -20,6 +20,7 @@
package org.apache.hadoop.hbase.mapreduce;
import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import java.io.IOException;
import java.util.ArrayList;
@@ -418,7 +419,7 @@
// If timestamp option is not specified, use current system time.
long timstamp = conf
- .getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis());
+ .getLong(TIMESTAMP_CONF_KEY, EnvironmentEdgeManager.currentTimeMillis());
// Set it back to replace invalid timestamp (non-numeric) with current
// system time
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (working copy)
@@ -59,6 +59,7 @@
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.WritableUtils;
@@ -128,7 +129,7 @@
private final Map writers =
new TreeMap(Bytes.BYTES_COMPARATOR);
private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
- private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
+ private final byte [] now = Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis());
private boolean rollRequested = false;
public void write(ImmutableBytesWritable row, KeyValue kv)
@@ -215,7 +216,7 @@
private void close(final HFile.Writer w) throws IOException {
if (w != null) {
w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
- Bytes.toBytes(System.currentTimeMillis()));
+ Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis()));
w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
Bytes.toBytes(context.getTaskAttemptID().toString()));
w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java (working copy)
@@ -21,6 +21,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* This class is for maintaining the various replication statistics for a source and publishing them
@@ -74,7 +75,7 @@
* @param timestamp write time of the edit
*/
public void setAgeOfLastShippedOp(long timestamp) {
- long age = System.currentTimeMillis() - timestamp;
+ long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp;
rms.setGauge(ageOfLastShippedOpKey, age);
rms.setGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, age);
this.lastTimestamp = timestamp;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java (working copy)
@@ -21,6 +21,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSource;
import org.apache.hadoop.hbase.replication.regionserver.metrics.ReplicationMetricsSourceFactory;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* This class is for maintaining the various replication statistics for a sink and publishing them
@@ -45,7 +46,7 @@
* @param timestamp The timestamp of the last operation applied.
*/
public void setAgeOfLastAppliedOp(long timestamp) {
- long age = System.currentTimeMillis() - timestamp;
+ long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp;
rms.setGauge(SINK_AGE_OF_LAST_APPLIED_OP, age);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (working copy)
@@ -42,6 +42,7 @@
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.MetaNodeTracker;
import org.apache.hadoop.hbase.zookeeper.RootRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -470,10 +471,10 @@
*/
public ServerName waitForMeta(long timeout)
throws InterruptedException, IOException, NotAllMetaRegionsOnlineException {
- long stop = System.currentTimeMillis() + timeout;
+ long stop = EnvironmentEdgeManager.currentTimeMillis() + timeout;
long waitTime = Math.min(50, timeout);
synchronized (metaAvailable) {
- while(!stopped && (timeout == 0 || System.currentTimeMillis() < stop)) {
+ while(!stopped && (timeout == 0 || EnvironmentEdgeManager.currentTimeMillis() < stop)) {
if (getMetaServerConnection() != null) {
return metaLocation;
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (working copy)
@@ -42,6 +42,7 @@
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Objects;
import org.apache.hadoop.io.*;
import org.apache.hadoop.ipc.RPC;
@@ -90,7 +91,7 @@
final boolean logDebug = LOG.isDebugEnabled();
long startTime = 0;
if (logDebug) {
- startTime = System.currentTimeMillis();
+ startTime = EnvironmentEdgeManager.currentTimeMillis();
}
try {
@@ -99,7 +100,7 @@
rpcTimeout);
if (logDebug) {
// FIGURE HOW TO TURN THIS OFF!
- long callTime = System.currentTimeMillis() - startTime;
+ long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);
}
return value.get();
@@ -332,10 +333,10 @@
throw new HBaseRPC.UnknownProtocolException(protocol);
}
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
Object[] params = call.getParameters();
Object value = method.invoke(impl, params);
- int processingTime = (int) (System.currentTimeMillis() - startTime);
+ int processingTime = (int) (EnvironmentEdgeManager.currentTimeMillis() - startTime);
int qTime = (int) (startTime-receivedTime);
if (TRACELOG.isDebugEnabled()) {
TRACELOG.debug("Call #" + CurCall.get().id +
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (working copy)
@@ -88,6 +88,7 @@
import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus;
import org.apache.hadoop.hbase.util.ByteBufferOutputStream;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IntWritable;
@@ -329,7 +330,7 @@
this.id = id;
this.param = param;
this.connection = connection;
- this.timestamp = System.currentTimeMillis();
+ this.timestamp = EnvironmentEdgeManager.currentTimeMillis();
this.response = null;
this.delayResponse = false;
this.responder = responder;
@@ -478,7 +479,7 @@
@Override
public void throwExceptionIfCallerDisconnected() throws CallerDisconnectedException {
if (!connection.channel.isOpen()) {
- long afterTime = System.currentTimeMillis() - timestamp;
+ long afterTime = EnvironmentEdgeManager.currentTimeMillis() - timestamp;
throw new CallerDisconnectedException(
"Aborting call " + this + " after " + afterTime + " ms, since " +
"caller disconnected");
@@ -630,7 +631,7 @@
*/
private void cleanupConnections(boolean force) {
if (force || numConnections > thresholdIdleConnections) {
- long currentTime = System.currentTimeMillis();
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
return;
}
@@ -667,7 +668,7 @@
}
else i++;
}
- lastCleanupRunTime = System.currentTimeMillis();
+ lastCleanupRunTime = EnvironmentEdgeManager.currentTimeMillis();
}
}
@@ -765,7 +766,7 @@
try {
reader.startAdd();
SelectionKey readKey = reader.registerChannel(channel);
- c = getConnection(channel, System.currentTimeMillis());
+ c = getConnection(channel, EnvironmentEdgeManager.currentTimeMillis());
readKey.attach(c);
synchronized (connectionList) {
connectionList.add(numConnections, c);
@@ -788,7 +789,7 @@
if (c == null) {
return;
}
- c.setLastContact(System.currentTimeMillis());
+ c.setLastContact(EnvironmentEdgeManager.currentTimeMillis());
try {
count = c.readAndProcess();
@@ -807,7 +808,7 @@
// c = null;
}
else {
- c.setLastContact(System.currentTimeMillis());
+ c.setLastContact(EnvironmentEdgeManager.currentTimeMillis());
}
}
@@ -881,7 +882,7 @@
LOG.info(getName() + ": doAsyncWrite threw exception " + e);
}
}
- long now = System.currentTimeMillis();
+ long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < lastPurgeTime + purgeTimeout) {
continue;
}
@@ -1036,7 +1037,7 @@
if (inHandler) {
// set the serve time when the response has to be sent later
- call.timestamp = System.currentTimeMillis();
+ call.timestamp = EnvironmentEdgeManager.currentTimeMillis();
if (enqueueInSelector(call))
done = true;
}
@@ -1084,7 +1085,7 @@
//
void doRespond(Call call) throws IOException {
// set the serve time when the response has to be sent later
- call.timestamp = System.currentTimeMillis();
+ call.timestamp = EnvironmentEdgeManager.currentTimeMillis();
responseQueueLen++;
boolean doRegister = false;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java (working copy)
@@ -67,6 +67,7 @@
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier;
import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.PoolMap;
import org.apache.hadoop.hbase.util.PoolMap.PoolType;
import org.apache.hadoop.io.DataOutputBuffer;
@@ -195,7 +196,7 @@
protected Call(Writable param) {
this.param = param;
- this.startTime = System.currentTimeMillis();
+ this.startTime = EnvironmentEdgeManager.currentTimeMillis();
synchronized (HBaseClient.this) {
this.id = counter++;
}
@@ -352,7 +353,7 @@
/** Update lastActivity with the current time. */
protected void touch() {
- lastActivity.set(System.currentTimeMillis());
+ lastActivity.set(EnvironmentEdgeManager.currentTimeMillis());
}
/**
@@ -511,7 +512,7 @@
protected synchronized boolean waitForWork() {
if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) {
long timeout = maxIdleTime-
- (System.currentTimeMillis()-lastActivity.get());
+ (EnvironmentEdgeManager.currentTimeMillis()-lastActivity.get());
if (timeout>0) {
try {
wait(timeout);
@@ -541,7 +542,7 @@
* since last I/O activity is equal to or greater than the ping interval
*/
protected synchronized void sendPing() throws IOException {
- long curTime = System.currentTimeMillis();
+ long curTime = EnvironmentEdgeManager.currentTimeMillis();
if ( curTime - lastActivity.get() >= pingInterval) {
lastActivity.set(curTime);
//noinspection SynchronizeOnNonFinalField
@@ -928,7 +929,7 @@
Iterator> itor = calls.entrySet().iterator();
while (itor.hasNext()) {
Call c = itor.next().getValue();
- long waitTime = System.currentTimeMillis() - c.getStartTime();
+ long waitTime = EnvironmentEdgeManager.currentTimeMillis() - c.getStartTime();
if (waitTime >= rpcTimeout) {
if (this.closeException == null) {
// There may be no exception in the case that there are many calls
@@ -952,7 +953,7 @@
try {
if (!calls.isEmpty()) {
Call firstCall = calls.get(calls.firstKey());
- long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime();
+ long maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() - firstCall.getStartTime();
if (maxWaitTime < rpcTimeout) {
rpcTimeout -= maxWaitTime;
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java (working copy)
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ReflectionUtils;
@@ -237,7 +238,7 @@
long timeout
) throws IOException {
// HBase does limited number of reconnects which is different from hadoop.
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
IOException ioe;
int reconnectAttempts = 0;
while (true) {
@@ -270,7 +271,7 @@
}
}
// check if timed out
- if (System.currentTimeMillis() - timeout >= startTime) {
+ if (EnvironmentEdgeManager.currentTimeMillis() - timeout >= startTime) {
throw ioe;
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java (revision 1367675)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java (working copy)
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.io.*;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Objects;
import org.apache.hadoop.hbase.util.ProtoUtil;
@@ -162,7 +163,7 @@
throws ServiceException {
long startTime = 0;
if (LOG.isDebugEnabled()) {
- startTime = System.currentTimeMillis();
+ startTime = EnvironmentEdgeManager.currentTimeMillis();
}
RpcRequestBody rpcRequest = constructRpcRequest(method, args);
@@ -173,7 +174,7 @@
rpcTimeout);
if (LOG.isDebugEnabled()) {
- long callTime = System.currentTimeMillis() - startTime;
+ long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);
}
@@ -372,7 +373,7 @@
throw new HBaseRPC.UnknownProtocolException(protocol);
}
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
if (method.getParameterTypes().length == 2) {
// RpcController + Message in the method args
// (generated code from RPC bits in .proto files have RpcController)
@@ -385,7 +386,7 @@
+ method.getName() + "]" + ", allowed (at most): 2, Actual: "
+ method.getParameterTypes().length);
}
- int processingTime = (int) (System.currentTimeMillis() - startTime);
+ int processingTime = (int) (EnvironmentEdgeManager.currentTimeMillis() - startTime);
int qTime = (int) (startTime-receiveTime);
if (TRACELOG.isDebugEnabled()) {
TRACELOG.debug("Call #" + CurCall.get().id +