hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject [hbase] branch master updated: HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302)
Date Tue, 01 Jun 2021 16:58:26 GMT
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new 335305e  HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302)
335305e is described below

commit 335305e0cf1ae00b18d6e7fb26a0e3617a4481a6
Author: Andrew Purtell <apurtell@apache.org>
AuthorDate: Tue Jun 1 09:57:48 2021 -0700

    HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302)
    
    We introduced EnvironmentEdgeManager as a way to inject alternate clocks
    for unit tests. In order for this to be effective, all callers that would
    otherwise use System.currentTimeMillis() must call
    EnvironmentEdgeManager.currentTime() instead, except the implementers of
    EnvironmentEdge.
    
    Signed-off-by: Bharath Vissapragada <bharathv@apache.org>
    Signed-off-by: Duo Zhang <zhangduo@apache.org>
    Signed-off-by: Viraj Jasani <vjasani@apache.org>
---
 .../hadoop/hbase/backup/impl/BackupManager.java    |  9 ++---
 .../apache/hadoop/hbase/backup/TestBackupBase.java |  2 +-
 .../hadoop/hbase/backup/TestBackupManager.java     |  9 ++---
 .../hadoop/hbase/backup/TestBackupSystemTable.java |  5 +--
 .../hbase/master/balancer/SimpleLoadBalancer.java  |  5 +--
 .../favored/TestFavoredNodeAssignmentHelper.java   |  4 ++-
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  |  3 +-
 .../hadoop/hbase/client/ClientIdGenerator.java     |  3 +-
 .../org/apache/hadoop/hbase/client/Delete.java     |  2 +-
 .../hadoop/hbase/client/RegionInfoBuilder.java     |  3 +-
 .../apache/hadoop/hbase/master/RegionState.java    |  7 ++--
 .../hadoop/hbase/slowlog/SlowLogTableAccessor.java |  8 ++---
 .../hbase/client/TestAsyncConnectionTracing.java   |  9 +++--
 .../client/TestAsyncRegionLocatorTracing.java      | 10 +++---
 .../hadoop/hbase/client/TestRegionInfoBuilder.java |  5 +--
 .../hadoop/hbase/client/TestRegionInfoDisplay.java |  3 +-
 .../org/apache/hadoop/hbase/ScheduledChore.java    |  6 ++--
 .../io/hadoopbackport/ThrottledInputStream.java    |  4 +--
 .../hadoop/hbase/util/CoprocessorClassLoader.java  |  4 +--
 .../java/org/apache/hadoop/hbase/util/IdLock.java  |  4 +--
 .../org/apache/hadoop/hbase/util/Random64.java     |  4 +--
 .../apache/hadoop/hbase/util/ReflectionUtils.java  |  2 +-
 .../java/org/apache/hadoop/hbase/util/Sleeper.java |  6 ++--
 .../java/org/apache/hadoop/hbase/util/Threads.java |  4 +--
 .../apache/hadoop/hbase/TestCellComparator.java    |  5 +--
 .../java/org/apache/hadoop/hbase/TestKeyValue.java | 15 ++++----
 .../test/java/org/apache/hadoop/hbase/Waiter.java  |  9 ++---
 .../hadoop/hbase/util/TestByteBufferArray.java     |  2 +-
 .../org/apache/hadoop/hbase/util/TestBytes.java    |  2 +-
 .../org/apache/hadoop/hbase/util/TestThreads.java  |  4 +--
 .../hadoop/hbase/coprocessor/TestSecureExport.java |  3 +-
 .../example/ExampleMasterObserverWithMetrics.java  |  5 +--
 .../example/WriteHeavyIncrementObserver.java       |  3 +-
 .../example/TestZooKeeperScanPolicyObserver.java   |  3 +-
 .../apache/hadoop/hbase/hbtop/screen/Screen.java   |  5 +--
 .../hbase/hbtop/screen/top/TopScreenModel.java     |  3 +-
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |  5 +--
 .../apache/hadoop/hbase/http/NoCacheFilter.java    |  4 +--
 .../org/apache/hadoop/hbase/ChaosZKClient.java     |  5 +--
 .../hadoop/hbase/DistributedHBaseCluster.java      | 13 +++----
 .../hadoop/hbase/IntegrationTestBackupRestore.java |  5 +--
 .../apache/hadoop/hbase/IntegrationTestIngest.java |  7 ++--
 .../hadoop/hbase/IntegrationTestManyRegions.java   |  5 +--
 .../IntegrationTestRegionReplicaReplication.java   |  7 ++--
 .../StripeCompactionsPerformanceEvaluation.java    | 10 +++---
 .../chaos/actions/MoveRegionsOfTableAction.java    |  5 +--
 .../hbase/chaos/actions/SnapshotTableAction.java   |  3 +-
 .../hbase/chaos/policies/PeriodicPolicy.java       |  5 +--
 .../IntegrationTestTableSnapshotInputFormat.java   |  3 +-
 .../hbase/test/IntegrationTestBigLinkedList.java   | 13 +++----
 .../hbase/test/IntegrationTestLoadCommonCrawl.java |  3 +-
 ...nTestTimeBoundedRequestsWithRegionReplicas.java |  5 +--
 .../hadoop/hbase/mapred/TableRecordReaderImpl.java |  7 ++--
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  6 ++--
 .../apache/hadoop/hbase/mapreduce/ImportTsv.java   |  3 +-
 .../apache/hadoop/hbase/mapreduce/SyncTable.java   |  3 +-
 .../hbase/mapreduce/TableRecordReaderImpl.java     |  7 ++--
 .../apache/hadoop/hbase/mapreduce/WALPlayer.java   |  5 +--
 .../hadoop/hbase/snapshot/ExportSnapshot.java      |  5 +--
 .../apache/hadoop/hbase/PerformanceEvaluation.java |  5 +--
 .../hadoop/hbase/mapreduce/TestCellCounter.java    |  3 +-
 .../hadoop/hbase/mapreduce/TestCopyTable.java      |  3 +-
 .../hadoop/hbase/mapreduce/TestImportExport.java   |  5 +--
 .../hadoop/hbase/mapreduce/TestRowCounter.java     | 17 ++++-----
 .../hadoop/hbase/mapreduce/TestSyncTable.java      |  5 +--
 .../hadoop/hbase/mapreduce/TestWALInputFormat.java |  3 +-
 .../hbase/mapreduce/TestWALRecordReader.java       | 32 ++++++++++-------
 .../hbase/replication/TestVerifyReplication.java   | 21 ++++++------
 .../replication/TestVerifyReplicationAdjunct.java  | 15 ++++----
 .../TestVerifyReplicationCrossDiffHdfs.java        |  5 +--
 .../hadoop/hbase/snapshot/TestExportSnapshot.java  |  6 ++--
 .../snapshot/TestExportSnapshotV1NoCluster.java    |  3 +-
 .../hadoop/hbase/procedure2/ProcedureExecutor.java |  2 +-
 .../procedure2/store/wal/WALProcedureStore.java    |  9 ++---
 .../TestProcedureSchedulerConcurrency.java         |  5 +--
 .../store/ProcedureStorePerformanceEvaluation.java |  5 +--
 .../ProcedureWALLoaderPerformanceEvaluation.java   | 14 ++++----
 .../org/apache/hadoop/hbase/rest/RESTServer.java   |  3 +-
 .../hadoop/hbase/rest/ScannerResultGenerator.java  |  3 +-
 .../apache/hadoop/hbase/rest/client/Client.java    |  5 +--
 .../hadoop/hbase/rest/PerformanceEvaluation.java   |  3 +-
 .../hbase/rest/client/TestRemoteAdminRetries.java  |  5 +--
 .../hbase/rest/client/TestRemoteHTableRetries.java |  5 +--
 .../hadoop/hbase/rest/client/TestRemoteTable.java  |  3 +-
 .../org/apache/hadoop/hbase/HealthCheckChore.java  | 12 ++++---
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  9 ++---
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |  9 ++---
 .../hbase/io/hfile/LruAdaptiveBlockCache.java      |  5 +--
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java    |  3 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java    |  3 +-
 .../hadoop/hbase/ipc/NettyServerRpcConnection.java |  3 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java     |  3 +-
 .../org/apache/hadoop/hbase/ipc/ServerCall.java    |  3 +-
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   | 10 +++---
 .../hadoop/hbase/ipc/SimpleRpcServerResponder.java |  7 ++--
 .../hbase/ipc/SimpleServerRpcConnection.java       |  5 +--
 .../org/apache/hadoop/hbase/master/HMaster.java    | 21 ++++++------
 .../hadoop/hbase/master/MasterRpcServices.java     |  3 +-
 .../apache/hadoop/hbase/master/ServerManager.java  | 13 ++++---
 .../hbase/master/assignment/AssignmentManager.java |  4 +--
 .../hbase/master/assignment/RegionStateStore.java  |  4 +--
 .../hadoop/hbase/master/cleaner/DirScanPool.java   |  5 +--
 .../hadoop/hbase/master/cleaner/HFileCleaner.java  |  5 +--
 .../hadoop/hbase/master/locking/LockManager.java   |  9 +++--
 .../hadoop/hbase/master/locking/LockProcedure.java |  7 ++--
 .../monitoring/MemoryBoundedLogMessageBuffer.java  |  4 +--
 .../hbase/monitoring/MonitoredRPCHandlerImpl.java  |  4 +--
 .../hadoop/hbase/monitoring/MonitoredTaskImpl.java |  8 +++--
 .../hbase/namequeues/impl/SlowLogQueueService.java |  3 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  8 ++---
 .../hadoop/hbase/regionserver/HRegionServer.java   | 20 +++++------
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  6 ++--
 .../hadoop/hbase/regionserver/ScannerContext.java  |  9 ++---
 .../hbase/regionserver/compactions/Compactor.java  |  2 +-
 .../regionserver/handler/WALSplitterHandler.java   |  5 +--
 .../hbase/regionserver/wal/AbstractFSWAL.java      |  2 +-
 .../hadoop/hbase/regionserver/wal/WALUtil.java     |  3 +-
 .../regionserver/DumpReplicationQueues.java        |  6 ++--
 .../HBaseInterClusterReplicationEndpoint.java      |  6 ++--
 .../replication/regionserver/MetricsSink.java      |  7 ++--
 .../regionserver/ReplicationSourceShipper.java     |  5 +--
 .../regionserver/ReplicationSyncUp.java            |  8 +++--
 .../access/SnapshotScannerHDFSAclHelper.java       | 33 +++++++++---------
 .../org/apache/hadoop/hbase/tool/CanaryTool.java   | 10 +++---
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  2 +-
 .../apache/hadoop/hbase/util/JVMClusterUtil.java   |  4 +--
 .../hadoop/hbase/util/ModifyRegionUtils.java       |  2 +-
 .../apache/hadoop/hbase/util/RegionSplitter.java   |  6 ++--
 .../util/compaction/MajorCompactionTTLRequest.java |  3 +-
 .../hbase/util/compaction/MajorCompactor.java      |  4 ++-
 .../apache/hadoop/hbase/wal/AbstractWALRoller.java |  8 +++--
 .../org/apache/hadoop/hbase/wal/WALSplitUtil.java  |  3 +-
 .../java/org/apache/hadoop/hbase/HBaseCluster.java |  5 +--
 .../apache/hadoop/hbase/HBaseTestingUtility.java   | 13 +++----
 .../hadoop/hbase/HFilePerformanceEvaluation.java   |  5 +--
 .../org/apache/hadoop/hbase/MiniHBaseCluster.java  |  9 ++---
 .../apache/hadoop/hbase/MultithreadedTestUtil.java |  5 +--
 .../hadoop/hbase/PerformanceEvaluationCommons.java |  5 +--
 .../hadoop/hbase/TestGlobalMemStoreSize.java       |  5 +--
 .../org/apache/hadoop/hbase/TestIOFencing.java     | 11 +++---
 .../apache/hadoop/hbase/TestMetaTableAccessor.java | 12 +++----
 .../apache/hadoop/hbase/TestMetaTableLocator.java  |  3 +-
 .../hbase/TestMetaUpdatesGoToPriorityQueue.java    |  3 +-
 .../org/apache/hadoop/hbase/TestSerialization.java |  9 +++--
 .../org/apache/hadoop/hbase/TestZooKeeper.java     |  3 +-
 .../hadoop/hbase/backup/TestHFileArchiving.java    |  9 ++---
 ...shotFromClientAfterSplittingRegionTestBase.java |  5 +--
 ...hotFromClientCloneLinksAfterDeleteTestBase.java |  7 ++--
 .../CloneSnapshotFromClientErrorTestBase.java      |  5 +--
 .../CloneSnapshotFromClientNormalTestBase.java     |  7 ++--
 .../client/CloneSnapshotFromClientTestBase.java    |  4 +--
 .../RestoreSnapshotFromClientCloneTestBase.java    |  3 +-
 .../RestoreSnapshotFromClientSimpleTestBase.java   |  3 +-
 .../client/RestoreSnapshotFromClientTestBase.java  |  4 +--
 .../org/apache/hadoop/hbase/client/TestAdmin2.java |  9 ++---
 .../org/apache/hadoop/hbase/client/TestAdmin3.java |  3 +-
 .../hbase/client/TestAsyncRegionAdminApi.java      | 12 +++----
 .../apache/hadoop/hbase/client/TestAsyncTable.java |  5 +--
 .../hbase/client/TestAsyncTableAdminApi2.java      |  5 +--
 .../hbase/client/TestBlockEvictionFromClient.java  | 10 +++---
 .../hbase/client/TestClientScannerRPCTimeout.java  |  5 +--
 .../apache/hadoop/hbase/client/TestConnection.java |  2 +-
 .../hadoop/hbase/client/TestFromClientSide.java    |  5 +--
 .../hadoop/hbase/client/TestFromClientSide3.java   |  3 +-
 .../hadoop/hbase/client/TestFromClientSide5.java   | 11 +++---
 ...oneSnapshotFromClientCloneLinksAfterDelete.java |  5 +--
 .../hbase/client/TestMutationGetCellBuilder.java   |  3 +-
 .../client/TestPutDeleteEtcCellIteration.java      |  3 +-
 .../hbase/client/TestScannersFromClientSide.java   |  4 +--
 .../client/TestSnapshotCloneIndependence.java      |  5 +--
 .../hadoop/hbase/client/TestSnapshotMetadata.java  |  9 ++---
 .../client/TestSnapshotTemporaryDirectory.java     |  5 +--
 .../hbase/client/TestTableSnapshotScanner.java     |  3 +-
 .../hbase/client/locking/TestEntityLocks.java      |  5 +--
 .../hadoop/hbase/codec/CodecPerformance.java       |  9 ++---
 .../hbase/coprocessor/TestCoprocessorMetrics.java  |  5 +--
 .../hbase/coprocessor/TestCoprocessorStop.java     |  8 ++---
 .../coprocessor/TestRegionObserverInterface.java   |  4 +--
 .../coprocessor/TestRegionObserverStacking.java    |  7 ++--
 .../hadoop/hbase/coprocessor/TestWALObserver.java  |  8 ++---
 .../hbase/filter/TestDependentColumnFilter.java    |  3 +-
 .../hbase/filter/TestFilterSerialization.java      |  5 +--
 .../filter/TestFuzzyRowAndColumnRangeFilter.java   |  5 +--
 .../hbase/filter/TestFuzzyRowFilterEndToEnd.java   |  9 ++---
 .../apache/hadoop/hbase/fs/TestBlockReorder.java   | 10 +++---
 .../hbase/fs/TestBlockReorderBlockLocation.java    |  5 +--
 .../hbase/fs/TestBlockReorderMultiBlocks.java      |  5 +--
 .../org/apache/hadoop/hbase/io/TestHeapSize.java   |  5 +--
 .../hbase/io/encoding/TestChangingEncoding.java    |  5 +--
 .../hadoop/hbase/io/hfile/TestHFileBlock.java      |  5 +--
 .../hfile/TestHFileScannerImplReferenceCount.java  |  3 +-
 .../hbase/io/hfile/TestScannerFromBucketCache.java |  6 ++--
 .../hbase/ipc/TestProtobufRpcServiceImpl.java      |  8 +++--
 .../hadoop/hbase/ipc/TestSimpleRpcScheduler.java   | 16 +++++----
 .../hadoop/hbase/master/AbstractTestDLS.java       | 10 +++---
 .../hbase/master/TestActiveMasterManager.java      |  7 ++--
 .../hbase/master/TestClockSkewDetection.java       | 11 +++---
 .../hadoop/hbase/master/TestMasterMetrics.java     |  4 ++-
 .../hbase/master/TestMasterMetricsWrapper.java     |  9 ++---
 .../master/TestMetaAssignmentWithStopMaster.java   |  9 ++---
 .../hadoop/hbase/master/TestRegionPlacement.java   |  3 +-
 .../hadoop/hbase/master/TestSplitLogManager.java   |  3 +-
 .../hadoop/hbase/master/TestWarmupRegion.java      |  3 +-
 .../master/assignment/MockMasterServices.java      |  5 +--
 .../master/assignment/TestAssignmentManager.java   |  5 +--
 .../assignment/TestAssignmentManagerBase.java      |  3 +-
 .../master/assignment/TestRegionReplicaSplit.java  |  3 +-
 .../master/assignment/TestRegionStateStore.java    | 10 +++---
 .../hbase/master/assignment/TestRegionStates.java  | 13 +++----
 .../master/assignment/TestRogueRSAssignment.java   |  4 ++-
 .../hbase/master/cleaner/TestHFileCleaner.java     |  4 +--
 .../hbase/master/cleaner/TestLogsCleaner.java      |  9 ++---
 .../cleaner/TestReplicationHFileCleaner.java       |  8 +++--
 .../hbase/master/janitor/TestCatalogJanitor.java   |  5 +--
 .../master/janitor/TestCatalogJanitorCluster.java  | 11 +++---
 .../janitor/TestCatalogJanitorInMemoryStates.java  |  5 +--
 .../hadoop/hbase/master/janitor/TestMetaFixer.java | 11 +++---
 .../hbase/master/locking/TestLockProcedure.java    |  9 ++---
 ...terProcedureSchedulerPerformanceEvaluation.java |  8 ++---
 .../procedure/MasterProcedureTestingUtility.java   |  6 ++--
 .../procedure/TestCloneSnapshotProcedure.java      |  3 +-
 .../procedure/TestRestoreSnapshotProcedure.java    |  3 +-
 .../procedure/TestSafemodeBringsDownMaster.java    |  5 +--
 .../hbase/master/region/MasterRegionTestBase.java  |  3 +-
 .../master/region/TestMasterRegionCompaction.java  |  3 +-
 .../region/TestMasterRegionOnTwoFileSystems.java   |  6 ++--
 .../org/apache/hadoop/hbase/mob/MobTestUtil.java   |  3 +-
 .../hbase/mob/TestExpiredMobFileCleaner.java       |  5 +--
 .../hadoop/hbase/mob/TestMobDataBlockEncoding.java |  3 +-
 .../hadoop/hbase/mob/TestMobStoreCompaction.java   |  5 +--
 .../hadoop/hbase/mob/TestMobStoreScanner.java      | 11 +++---
 .../RegionProcedureStorePerformanceEvaluation.java |  3 +-
 .../region/RegionProcedureStoreTestHelper.java     |  3 +-
 .../store/region/TestRegionProcedureStore.java     |  3 +-
 .../hadoop/hbase/quotas/TestRateLimiter.java       |  2 +-
 .../hbase/regionserver/TestCompactingMemStore.java | 10 +++---
 .../TestCompactingToCellFlatMapMemStore.java       | 27 ++++++++-------
 .../hbase/regionserver/TestCompactionState.java    |  5 +--
 .../regionserver/TestDataBlockEncodingTool.java    |  3 +-
 .../regionserver/TestDefaultCompactSelection.java  |  2 +-
 .../hbase/regionserver/TestDefaultMemStore.java    | 10 +++---
 .../regionserver/TestEndToEndSplitTransaction.java | 21 ++++++------
 .../hadoop/hbase/regionserver/TestHMobStore.java   |  3 +-
 .../hadoop/hbase/regionserver/TestHRegion.java     | 40 +++++++++++-----------
 .../regionserver/TestHRegionReplayEvents.java      |  2 +-
 .../regionserver/TestHRegionServerBulkLoad.java    |  3 +-
 .../hadoop/hbase/regionserver/TestHStore.java      | 10 +++---
 .../hadoop/hbase/regionserver/TestHStoreFile.java  |  9 ++---
 .../hbase/regionserver/TestMajorCompaction.java    |  3 +-
 .../regionserver/TestMasterAddressTracker.java     | 18 ++++++----
 .../regionserver/TestMemStoreSegmentsIterator.java |  5 +--
 .../hbase/regionserver/TestRegionIncrement.java    | 11 +++---
 .../hadoop/hbase/regionserver/TestRegionInfo.java  |  7 ++--
 .../TestRegionMergeTransactionOnCluster.java       | 11 +++---
 .../hadoop/hbase/regionserver/TestRegionOpen.java  |  3 +-
 .../hbase/regionserver/TestRegionReplicas.java     |  5 +--
 .../regionserver/TestRegionServerAbortTimeout.java |  5 +--
 .../TestRegionServerReportForDuty.java             |  9 +++--
 .../hbase/regionserver/TestReversibleScanners.java |  3 +-
 .../regionserver/TestRowPrefixBloomFilter.java     |  3 +-
 .../hadoop/hbase/regionserver/TestScanner.java     |  7 ++--
 .../regionserver/TestScannerWithBulkload.java      | 10 +++---
 .../regionserver/TestSimpleTimeRangeTracker.java   |  3 +-
 .../TestStoreFileScannerWithTagCompression.java    |  3 +-
 .../hbase/regionserver/TestStoreScanner.java       | 13 +++----
 .../hadoop/hbase/regionserver/TestWALLockup.java   |  3 +-
 .../hadoop/hbase/regionserver/TestWideScanner.java |  3 +-
 .../regionserver/compactions/TestCloseChecker.java |  3 +-
 .../compactions/TestFIFOCompactionPolicy.java      |  2 +-
 .../TestCompactionWithThroughputController.java    | 11 +++---
 .../hbase/regionserver/wal/AbstractTestFSWAL.java  |  8 ++---
 .../wal/AbstractTestLogRollPeriod.java             |  5 +--
 .../regionserver/wal/AbstractTestProtobufLog.java  |  3 +-
 .../regionserver/wal/AbstractTestWALReplay.java    |  3 +-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java     |  3 +-
 .../regionserver/wal/TestCombinedAsyncWriter.java  |  3 +-
 .../hbase/regionserver/wal/TestDurability.java     | 13 ++++---
 .../hbase/regionserver/wal/TestLogRollAbort.java   |  5 +--
 .../hbase/regionserver/wal/TestLogRolling.java     |  9 ++---
 .../regionserver/wal/TestLogRollingNoCluster.java  |  3 +-
 .../TestReplicationEmptyWALRecovery.java           | 11 +++---
 .../hbase/replication/TestReplicationEndpoint.java |  3 +-
 .../hbase/replication/TestReplicationKillRS.java   |  5 +--
 .../replication/TestReplicationSmallTests.java     |  5 +--
 .../TestReplicationWALEntryFilters.java            |  7 ++--
 .../master/TestRecoverStandbyProcedure.java        |  3 +-
 .../regionserver/TestDumpReplicationQueues.java    |  3 +-
 ...InterClusterReplicationEndpointFilterEdits.java | 30 +++++++++-------
 .../regionserver/TestReplicationSink.java          |  5 +--
 .../regionserver/TestReplicationSourceManager.java |  9 +++--
 .../TestSerialReplicationEndpoint.java             |  5 +--
 .../regionserver/TestWALEntryStream.java           | 18 +++++-----
 .../hbase/rsgroup/EnableRSGroupsTestBase.java      |  5 +--
 .../hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java   | 11 +++---
 .../security/access/TestAccessController.java      |  3 +-
 .../access/TestCellACLWithMultipleVersions.java    |  2 +-
 .../security/token/TestTokenAuthentication.java    |  2 +-
 .../token/TestZKSecretWatcherRefreshKeys.java      |  7 ++--
 .../hbase/snapshot/SnapshotTestingUtils.java       |  9 ++---
 .../snapshot/TestFlushSnapshotFromClient.java      |  6 ++--
 .../TestRestoreFlushSnapshotFromClient.java        | 14 +++++---
 .../hadoop/hbase/util/ConstantDelayQueue.java      |  2 +-
 .../apache/hadoop/hbase/util/HFileTestUtil.java    |  5 +--
 .../hadoop/hbase/util/MultiThreadedAction.java     |  6 ++--
 .../hadoop/hbase/util/MultiThreadedReader.java     |  6 ++--
 .../hbase/util/MultiThreadedReaderWithACL.java     |  2 +-
 .../hadoop/hbase/util/MultiThreadedUpdater.java    | 18 +++++-----
 .../hbase/util/MultiThreadedUpdaterWithACL.java    | 10 +++---
 .../hadoop/hbase/util/MultiThreadedWriter.java     | 10 +++---
 .../hadoop/hbase/util/MultiThreadedWriterBase.java |  3 +-
 .../hbase/util/MultiThreadedWriterWithACL.java     |  8 ++---
 .../hadoop/hbase/util/TestBloomFilterChunk.java    | 12 +++----
 .../hbase/util/TestDefaultEnvironmentEdge.java     | 11 +++---
 .../hadoop/hbase/util/TestFSTableDescriptors.java  |  4 +--
 .../org/apache/hadoop/hbase/util/TestFSUtils.java  | 16 +++++----
 .../org/apache/hadoop/hbase/util/TestIdLock.java   |  4 +--
 .../util/TestIdReadWriteLockWithObjectPool.java    |  4 +--
 .../util/TestIncrementingEnvironmentEdge.java      |  2 +-
 .../hbase/util/compaction/TestMajorCompactor.java  |  5 +--
 .../apache/hadoop/hbase/wal/TestCompressedWAL.java |  3 +-
 .../hadoop/hbase/wal/TestFSHLogProvider.java       |  3 +-
 .../org/apache/hadoop/hbase/wal/TestSecureWAL.java |  3 +-
 .../hbase/wal/TestSyncReplicationWALProvider.java  |  3 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java    | 31 +++++++++--------
 .../apache/hadoop/hbase/wal/TestWALMethods.java    |  3 +-
 .../hadoop/hbase/wal/TestWALReaderOnSecureWAL.java |  6 ++--
 .../apache/hadoop/hbase/wal/TestWALRootDir.java    | 12 ++++---
 .../org/apache/hadoop/hbase/wal/TestWALSplit.java  |  4 +--
 .../hadoop/hbase/wal/TestWALSplitToHFile.java      | 10 +++---
 .../hadoop/hbase/wal/WALPerformanceEvaluation.java |  5 +--
 .../hbase/thrift/TBoundedThreadPoolServer.java     |  5 +--
 .../apache/hadoop/hbase/thrift/ThriftServer.java   |  3 +-
 .../hadoop/hbase/thrift/TestThriftServer.java      | 11 +++---
 .../hadoop/hbase/thrift2/TestThriftConnection.java |  3 +-
 .../thrift2/TestThriftHBaseServiceHandler.java     | 23 +++++++------
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   |  6 ++--
 .../hbase/zookeeper/MiniZooKeeperCluster.java      | 10 +++---
 .../hadoop/hbase/zookeeper/ZKNodeTracker.java      |  5 +--
 338 files changed, 1271 insertions(+), 950 deletions(-)

diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index 861b79a..79c702c 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.procedure.ProcedureManagerHost;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -382,11 +383,11 @@ public class BackupManager implements Closeable {
    * @throws IOException if active session already exists
    */
   public void startBackupSession() throws IOException {
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     long timeout = conf.getInt(BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY,
       DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT) * 1000L;
     long lastWarningOutputTime = 0;
-    while (System.currentTimeMillis() - startTime < timeout) {
+    while (EnvironmentEdgeManager.currentTime() - startTime < timeout) {
       try {
         systemTable.startBackupExclusiveOperation();
         return;
@@ -400,8 +401,8 @@ public class BackupManager implements Closeable {
             Thread.currentThread().interrupt();
           }
           if (lastWarningOutputTime == 0
-              || (System.currentTimeMillis() - lastWarningOutputTime) > 60000) {
-            lastWarningOutputTime = System.currentTimeMillis();
+              || (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) {
+            lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
             LOG.warn("Waiting to acquire backup exclusive lock for {}s",
                 +(lastWarningOutputTime - startTime) / 1000);
           }
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 692c21b..bacb2d4 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -425,7 +425,7 @@ public class TestBackupBase {
   }
 
   protected static void createTables() throws Exception {
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     table1 = TableName.valueOf("test-" + tid);
     Admin ha = TEST_UTIL.getAdmin();
 
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java
index 3e42294..625fbab 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.backup.impl.BackupManager;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -96,17 +97,17 @@ public class TestBackupManager {
       public void run() {
         try {
           backupManager.startBackupSession();
-          boolean result = startTimes.compareAndSet(0, 0, System.currentTimeMillis());
+          boolean result = startTimes.compareAndSet(0, 0, EnvironmentEdgeManager.currentTime());
           if (!result) {
-            result = startTimes.compareAndSet(1, 0, System.currentTimeMillis());
+            result = startTimes.compareAndSet(1, 0, EnvironmentEdgeManager.currentTime());
             if (!result) {
               throw new IOException("PANIC! Unreachable code");
             }
           }
           Thread.sleep(sleepTime);
-          result = stopTimes.compareAndSet(0, 0, System.currentTimeMillis());
+          result = stopTimes.compareAndSet(0, 0, EnvironmentEdgeManager.currentTime());
           if (!result) {
-            result = stopTimes.compareAndSet(1, 0, System.currentTimeMillis());
+            result = stopTimes.compareAndSet(1, 0, EnvironmentEdgeManager.currentTime());
             if (!result) {
               throw new IOException("PANIC! Unreachable code");
             }
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
index 5d48fc5..450cf2f 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -520,8 +521,8 @@ public class TestBackupSystemTable {
         new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] {
             TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") },
             "/hbase/backup");
-    ctxt.setStartTs(System.currentTimeMillis());
-    ctxt.setCompleteTs(System.currentTimeMillis() + 1);
+    ctxt.setStartTs(EnvironmentEdgeManager.currentTime());
+    ctxt.setCompleteTs(EnvironmentEdgeManager.currentTime() + 1);
     return ctxt;
   }
 
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index c716f2f..84418b3 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -298,7 +299,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
   @Override
   protected List<RegionPlan> balanceTable(TableName tableName,
       Map<ServerName, List<RegionInfo>> loadOfOneTable) {
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
 
     // construct a Cluster object with clusterMap and rest of the
     // argument as defaults
@@ -482,7 +483,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
       balanceOverall(regionsToReturn, serverBalanceInfo, fetchFromTail, regionsToMove, max, min);
     }
 
-    long endTime = System.currentTimeMillis();
+    long endTime = EnvironmentEdgeManager.currentTime();
 
     if (!regionsToMove.isEmpty() || neededRegions != 0) {
       // Emit data so can diagnose how balancer went astray.
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
index 986923f..b2baaa0 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Triple;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -98,7 +99,8 @@ public class TestFavoredNodeAssignmentHelper {
       }
     });
     for (int i = 0; i < 40; i++) {
-      ServerName server = ServerName.valueOf("foo" + i, 1234, System.currentTimeMillis());
+      ServerName server = ServerName.valueOf("foo" + i, 1234,
+        EnvironmentEdgeManager.currentTime());
       String rack = getRack(i);
       if (!rack.equals(RackManager.UNKNOWN_RACK)) {
         rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
index 3c84097..c97e094 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -32,6 +32,7 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Strings;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -136,7 +137,7 @@ public final class ServerMetricsBuilder {
   private final Map<byte[], RegionMetrics> regionStatus = new TreeMap<>(Bytes.BYTES_COMPARATOR);
   private final Map<byte[], UserMetrics> userMetrics = new TreeMap<>(Bytes.BYTES_COMPARATOR);
   private final Set<String> coprocessorNames = new TreeSet<>();
-  private long reportTimestamp = System.currentTimeMillis();
+  private long reportTimestamp = EnvironmentEdgeManager.currentTime();
   private long lastReportTimestamp = 0;
   private ServerMetricsBuilder(ServerName serverName) {
     this.serverName = serverName;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
index d4b4b4a..9125132 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
@@ -26,6 +26,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * The class that is able to determine some unique strings for the client,
@@ -45,7 +46,7 @@ final class ClientIdGenerator {
     byte[] selfBytes = getIpAddressBytes();
     Long pid = getPid();
     long tid = Thread.currentThread().getId();
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
 
     byte[] id = new byte[selfBytes.length + ((pid != null ? 1 : 0) + 2) * Bytes.SIZEOF_LONG];
     int offset = Bytes.putBytes(id, 0, selfBytes, 0, selfBytes.length);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index ccda14f..2a219f6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -55,7 +55,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Specifying timestamps, deleteFamily and deleteColumns will delete all
  * versions with a timestamp less than or equal to that passed.  If no
  * timestamp is specified, an entry is added with a timestamp of 'now'
- * where 'now' is the servers's System.currentTimeMillis().
+ * where 'now' is the servers's EnvironmentEdgeManager.currentTime().
  * Specifying a timestamp to the deleteColumn method will
  * delete versions only with a timestamp equal to that specified.
  * If no timestamp is passed to deleteColumn, internally, it figures the
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index cbf9e4a..cc42b96 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
@@ -48,7 +49,7 @@ public class RegionInfoBuilder {
   private final TableName tableName;
   private byte[] startKey = HConstants.EMPTY_START_ROW;
   private byte[] endKey = HConstants.EMPTY_END_ROW;
-  private long regionId = System.currentTimeMillis();
+  private long regionId = EnvironmentEdgeManager.currentTime();
   private int replicaId = RegionInfo.DEFAULT_REPLICA_ID;
   private boolean offLine = false;
   private boolean split = false;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
index 2d03473..8ae0888 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master;
 import java.util.Date;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
@@ -188,11 +189,11 @@ public class RegionState {
   private long ritDuration;
 
   public static RegionState createForTesting(RegionInfo region, State state) {
-    return new RegionState(region, state, System.currentTimeMillis(), null);
+    return new RegionState(region, state, EnvironmentEdgeManager.currentTime(), null);
   }
 
   public RegionState(RegionInfo region, State state, ServerName serverName) {
-    this(region, state, System.currentTimeMillis(), serverName);
+    this(region, state, EnvironmentEdgeManager.currentTime(), serverName);
   }
 
   public RegionState(RegionInfo region,
@@ -390,7 +391,7 @@ public class RegionState {
    * A slower (but more easy-to-read) stringification
    */
   public String toDescriptiveString() {
-    long relTime = System.currentTimeMillis() - stamp;
+    long relTime = EnvironmentEdgeManager.currentTime() - stamp;
     return hri.getRegionNameAsString()
       + " state=" + state
       + ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)"
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java
index f4f29c6..bf4cd04 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java
@@ -127,9 +127,9 @@ public class SlowLogTableAccessor {
   }
 
   /**
-   * Create rowKey: currentTimeMillis APPEND slowLogPayload.hashcode
+   * Create rowKey: currentTime APPEND slowLogPayload.hashcode
    * Scan on slowlog table should keep records with sorted order of time, however records
-   * added at the very same time (currentTimeMillis) could be in random order.
+   * added at the very same time could be in random order.
    *
    * @param slowLogPayload SlowLogPayload to process
    * @return rowKey byte[]
@@ -141,8 +141,8 @@ public class SlowLogTableAccessor {
     if (lastFiveDig.startsWith("-")) {
       lastFiveDig = String.valueOf(RANDOM.nextInt(99999));
     }
-    final long currentTimeMillis = EnvironmentEdgeManager.currentTime();
-    final String timeAndHashcode = currentTimeMillis + lastFiveDig;
+    final long currentTime = EnvironmentEdgeManager.currentTime();
+    final String timeAndHashcode = currentTime + lastFiveDig;
     final long rowKeyLong = Long.parseLong(timeAndHashcode);
     return Bytes.toBytes(rowKeyLong);
   }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
index c14365a..eef0b13 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -53,7 +54,7 @@ public class TestAsyncConnectionTracing {
   private static Configuration CONF = HBaseConfiguration.create();
 
   private ServerName masterServer =
-    ServerName.valueOf("localhost", 12345, System.currentTimeMillis());
+    ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime());
 
   private AsyncConnection conn;
 
@@ -87,7 +88,8 @@ public class TestAsyncConnectionTracing {
       .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get();
     assertEquals(StatusCode.OK, data.getStatus().getStatusCode());
     if (serverName != null) {
-      assertEquals(serverName.getServerName(), data.getAttributes().get(TraceUtil.SERVER_NAME_KEY));
+      assertEquals(serverName.getServerName(),
+        data.getAttributes().get(TraceUtil.SERVER_NAME_KEY));
     }
   }
 
@@ -99,7 +101,8 @@ public class TestAsyncConnectionTracing {
 
   @Test
   public void testHbckWithServerName() throws IOException {
-    ServerName serverName = ServerName.valueOf("localhost", 23456, System.currentTimeMillis());
+    ServerName serverName = ServerName.valueOf("localhost", 23456,
+      EnvironmentEdgeManager.currentTime());
     conn.getHbck(serverName);
     assertTrace("getHbck", serverName);
   }
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index b629bbf..15b00f6 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -69,11 +70,11 @@ public class TestAsyncRegionLocatorTracing {
     RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
     locs = new RegionLocations(
       new HRegionLocation(metaRegionInfo,
-        ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis())),
+        ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
       new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 1),
-        ServerName.valueOf("127.0.0.2", 12345, System.currentTimeMillis())),
+        ServerName.valueOf("127.0.0.2", 12345, EnvironmentEdgeManager.currentTime())),
       new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2),
-        ServerName.valueOf("127.0.0.3", 12345, System.currentTimeMillis())));
+        ServerName.valueOf("127.0.0.3", 12345, EnvironmentEdgeManager.currentTime())));
     conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF) {
 
       @Override
@@ -103,7 +104,8 @@ public class TestAsyncRegionLocatorTracing {
 
   @Test
   public void testClearCacheServerName() {
-    ServerName sn = ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis());
+    ServerName sn = ServerName.valueOf("127.0.0.1", 12345,
+      EnvironmentEdgeManager.currentTime());
     conn.getLocator().clearCache(sn);
     SpanData span = waitSpan("AsyncRegionLocator.clearCache");
     assertEquals(StatusCode.OK, span.getStatus().getStatusCode());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
index 0d2b7cc..d7b375b 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -198,7 +199,7 @@ public class TestRegionInfoBuilder {
   public void testParseName() throws IOException {
     final TableName tableName = name.getTableName();
     byte[] startKey = Bytes.toBytes("startKey");
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     int replicaId = 42;
 
     // test without replicaId
@@ -228,7 +229,7 @@ public class TestRegionInfoBuilder {
     byte[] startKey = Bytes.toBytes("startKey");
     byte[] endKey = Bytes.toBytes("endKey");
     boolean split = false;
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     int replicaId = 42;
 
     RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey)
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
index c0bbd91..5a21171 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -56,7 +57,7 @@ public class TestRegionInfoDisplay {
     .setStartKey(startKey)
     .setEndKey(endKey)
     .setSplit(false)
-    .setRegionId(System.currentTimeMillis())
+    .setRegionId(EnvironmentEdgeManager.currentTime())
     .setReplicaId(1).build();
     checkEquality(ri, conf);
     Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java
index 6155bbd..aaedc78 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase;
 import com.google.errorprone.annotations.RestrictedApi;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -172,7 +174,7 @@ public abstract class ScheduledChore implements Runnable {
    */
   private synchronized void updateTimeTrackingBeforeRun() {
     timeOfLastRun = timeOfThisRun;
-    timeOfThisRun = System.currentTimeMillis();
+    timeOfThisRun = EnvironmentEdgeManager.currentTime();
   }
 
   /**
@@ -215,7 +217,7 @@ public abstract class ScheduledChore implements Runnable {
    * @return true if time is earlier or equal to current milli time
    */
   private synchronized boolean isValidTime(final long time) {
-    return time > 0 && time <= System.currentTimeMillis();
+    return time > 0 && time <= EnvironmentEdgeManager.currentTime();
   }
 
   /**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 68627c3..6f45d2d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -42,7 +42,7 @@ public class ThrottledInputStream extends InputStream {
 
   private final InputStream rawStream;
   private final long maxBytesPerSec;
-  private final long startTime = System.currentTimeMillis();
+  private final long startTime = EnvironmentEdgeManager.currentTime();
 
   private long bytesRead = 0;
   private long totalSleepTime = 0;
@@ -164,7 +164,7 @@ public class ThrottledInputStream extends InputStream {
    * @return Read rate, in bytes/sec.
    */
   public long getBytesPerSec() {
-    long elapsed = (System.currentTimeMillis() - startTime) / 1000;
+    long elapsed = (EnvironmentEdgeManager.currentTime() - startTime) / 1000;
     if (elapsed == 0) {
       return bytesRead;
     } else {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index f1589ba..f36ff13 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -174,7 +174,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
       for (Path path : FileUtil.stat2Paths(fileStatuses)) {  // for each file that match the pattern
         if (fs.isFile(path)) {  // only process files, skip for directories
           File dst = new File(parentDirStr, "." + pathPrefix + "."
-            + path.getName() + "." + System.currentTimeMillis() + ".jar");
+            + path.getName() + "." + EnvironmentEdgeManager.currentTime() + ".jar");
           fs.copyToLocalFile(path, new Path(dst.toString()));
           dst.deleteOnExit();
 
@@ -188,7 +188,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
               Matcher m = libJarPattern.matcher(entry.getName());
               if (m.matches()) {
                 File file = new File(parentDirStr, "." + pathPrefix + "."
-                  + path.getName() + "." + System.currentTimeMillis() + "." + m.group(1));
+                  + path.getName() + "." + EnvironmentEdgeManager.currentTime() + "." + m.group(1));
                 try (FileOutputStream outStream = new FileOutputStream(file)) {
                   IOUtils.copyBytes(jarFile.getInputStream(entry),
                     outStream, conf, true);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
index 112af1e..fbf12ab 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
@@ -129,7 +129,7 @@ public class IdLock {
     Thread currentThread = Thread.currentThread();
     Entry entry = new Entry(id, currentThread);
     Entry existing;
-    long waitUtilTS = System.currentTimeMillis() + time;
+    long waitUtilTS = EnvironmentEdgeManager.currentTime() + time;
     long remaining = time;
     while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
       synchronized (existing) {
@@ -139,7 +139,7 @@ public class IdLock {
             while (existing.locked) {
               existing.wait(remaining);
               if (existing.locked) {
-                long currentTS = System.currentTimeMillis();
+                long currentTS = EnvironmentEdgeManager.currentTime();
                 if (currentTS >= waitUtilTS) {
                   // time is up
                   return null;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
index f337b5f..fa26758 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java
@@ -108,7 +108,7 @@ public class Random64 {
     final int precision = 100000;
     final long totalTestCnt = defaultTotalTestCnt + precision;
     final int reportPeriod = 100 * precision;
-    final long startTime = System.currentTimeMillis();
+    final long startTime = EnvironmentEdgeManager.currentTime();
 
     System.out.println("Do collision test, totalTestCnt=" + totalTestCnt);
 
@@ -130,7 +130,7 @@ public class Random64 {
         }
 
         if (cnt % reportPeriod == 0) {
-          long cost = System.currentTimeMillis() - startTime;
+          long cost = EnvironmentEdgeManager.currentTime() - startTime;
           long remainingMs = (long) (1.0 * (totalTestCnt - cnt) * cost / cnt);
           System.out.println(
             String.format(
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
index 204d5c9..7e19578 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java
@@ -122,7 +122,7 @@ public class ReflectionUtils {
     boolean dumpStack = false;
     if (log.isInfoEnabled()) {
       synchronized (ReflectionUtils.class) {
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (now - previousLogTime >= minInterval * 1000) {
           previousLogTime = now;
           dumpStack = true;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
index 54accef..d2f0371 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
@@ -71,7 +71,7 @@ public class Sleeper {
     if (this.stopper.isStopped()) {
       return;
     }
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     long currentSleepTime = sleepTime;
     while (currentSleepTime > 0) {
       long woke = -1;
@@ -83,7 +83,7 @@ public class Sleeper {
 
           sleepLock.wait(currentSleepTime);
         }
-        woke = System.currentTimeMillis();
+        woke = EnvironmentEdgeManager.currentTime();
         long slept = woke - now;
         if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) {
           LOG.warn("We slept {}ms instead of {}ms, this is likely due to a long " +
@@ -98,7 +98,7 @@ public class Sleeper {
         }
       }
       // Recalculate waitTime.
-      woke = (woke == -1)? System.currentTimeMillis(): woke;
+      woke = (woke == -1)? EnvironmentEdgeManager.currentTime() : woke;
       currentSleepTime = this.period - (woke - now);
     }
     synchronized(sleepLock) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
index 726adcb..b4bbdac 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java
@@ -150,7 +150,7 @@ public class Threads {
    * @param msToWait the amount of time to sleep in milliseconds
    */
   public static void sleepWithoutInterrupt(final long msToWait) {
-    long timeMillis = System.currentTimeMillis();
+    long timeMillis = EnvironmentEdgeManager.currentTime();
     long endTime = timeMillis + msToWait;
     boolean interrupted = false;
     while (timeMillis < endTime) {
@@ -159,7 +159,7 @@ public class Threads {
       } catch (InterruptedException ex) {
         interrupted = true;
       }
-      timeMillis = System.currentTimeMillis();
+      timeMillis = EnvironmentEdgeManager.currentTime();
     }
 
     if (interrupted) {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
index 7762330..8729a44 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -139,7 +140,7 @@ public class TestCellComparator {
    */
   @Test
   public void testMetaComparisons() throws Exception {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
 
     // Meta compares
     Cell aaa = createByteBufferKeyValueFromKeyValue(new KeyValue(
@@ -176,7 +177,7 @@ public class TestCellComparator {
    */
   @Test
   public void testMetaComparisons2() {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     CellComparator c = MetaCellComparator.META_COMPARATOR;
     assertTrue(c.compare(createByteBufferKeyValueFromKeyValue(new KeyValue(
             Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)),
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
index 8b943be..79d2bba 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
@@ -39,6 +39,7 @@ import java.util.TreeSet;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -144,7 +145,7 @@ public class TestKeyValue {
 
   @Test
   public void testMoreComparisons() {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
 
     // Meta compares
     KeyValue aaa = new KeyValue(
@@ -174,7 +175,7 @@ public class TestKeyValue {
   @Test
   public void testMetaComparatorTableKeysWithCommaOk() {
     CellComparator c = MetaCellComparator.META_COMPARATOR;
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     // meta keys values are not quite right.  A users can enter illegal values
     // from shell when scanning meta.
     KeyValue a = new KeyValue(Bytes.toBytes("table,key,with,commas1,1234"), now);
@@ -204,7 +205,7 @@ public class TestKeyValue {
   }
 
   private void metacomparisons(final CellComparatorImpl c) {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     assertTrue(c.compare(new KeyValue(
         Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now),
       new KeyValue(
@@ -221,7 +222,7 @@ public class TestKeyValue {
   }
 
   private void comparisons(final CellComparatorImpl c) {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     assertTrue(c.compare(new KeyValue(
         Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now),
       new KeyValue(
@@ -520,7 +521,7 @@ public class TestKeyValue {
   @Test
   public void testMetaKeyComparator() {
     CellComparator c = MetaCellComparator.META_COMPARATOR;
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
 
     KeyValue a = new KeyValue(Bytes.toBytes("table1"), now);
     KeyValue b = new KeyValue(Bytes.toBytes("table2"), now);
@@ -589,12 +590,12 @@ public class TestKeyValue {
       new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
             Bytes.toBytes("2")),
       new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
-            System.currentTimeMillis(), Bytes.toBytes("2"),
+            EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"),
             new Tag[] {
               new ArrayBackedTag((byte) 120, "tagA"),
               new ArrayBackedTag((byte) 121, Bytes.toBytes("tagB")) }),
       new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
-            System.currentTimeMillis(), Bytes.toBytes("2"),
+            EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"),
             new Tag[] { new ArrayBackedTag((byte) 0, "tagA") }),
       new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes(""),
             Bytes.toBytes("1")) };
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java
index 5302093..fd5b0b1 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
 import java.text.MessageFormat;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -171,7 +172,7 @@ public final class Waiter {
    */
   public static <E extends Exception> long waitFor(Configuration conf, long timeout, long interval,
       boolean failIfTimeout, Predicate<E> predicate) {
-    long started = System.currentTimeMillis();
+    long started = EnvironmentEdgeManager.currentTime();
     long adjustedTimeout = (long) (getWaitForRatio(conf) * timeout);
     long mustEnd = started + adjustedTimeout;
     long remainderWait;
@@ -183,7 +184,7 @@ public final class Waiter {
       LOG.info(MessageFormat.format("Waiting up to [{0}] milli-secs(wait.for.ratio=[{1}])",
         adjustedTimeout, getWaitForRatio(conf)));
       while (!(eval = predicate.evaluate())
-              && (remainderWait = mustEnd - System.currentTimeMillis()) > 0) {
+              && (remainderWait = mustEnd - EnvironmentEdgeManager.currentTime()) > 0) {
         try {
           // handle tail case when remainder wait is less than one interval
           sleepInterval = Math.min(remainderWait, interval);
@@ -197,7 +198,7 @@ public final class Waiter {
       if (!eval) {
         if (interrupted) {
           LOG.warn(MessageFormat.format("Waiting interrupted after [{0}] msec",
-            System.currentTimeMillis() - started));
+            EnvironmentEdgeManager.currentTime() - started));
         } else if (failIfTimeout) {
           String msg = getExplanation(predicate);
           fail(MessageFormat
@@ -208,7 +209,7 @@ public final class Waiter {
               MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout) + msg);
         }
       }
-      return (eval || interrupted) ? (System.currentTimeMillis() - started) : -1;
+      return (eval || interrupted) ? (EnvironmentEdgeManager.currentTime() - started) : -1;
     } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java
index 6929403..8384b05 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java
@@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category;
 @Category({ MiscTests.class, SmallTests.class })
 public class TestByteBufferArray {
 
-  private static final Random RANDOM = new Random(System.currentTimeMillis());
+  private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime());
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
index dde0d27..113eae1 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
@@ -359,7 +359,7 @@ public class TestBytes extends TestCase {
 
   public void testToStringBytesBinaryReversible() {
     //  let's run test with 1000 randomly generated byte arrays
-    Random rand = new Random(System.currentTimeMillis());
+    Random rand = new Random(EnvironmentEdgeManager.currentTime());
     byte[] randomBytes = new byte[1000];
     for (int i = 0; i < 1000; i++) {
       rand.nextBytes(randomBytes);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
index 4f6da4e..98b14ac 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java
@@ -56,7 +56,7 @@ public class TestThreads {
     });
     LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)");
     sleeper.start();
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     LOG.debug("Main thread: sleeping for 200 ms");
     Threads.sleep(200);
 
@@ -75,7 +75,7 @@ public class TestThreads {
     assertTrue("sleepWithoutInterrupt did not preserve the thread's " +
         "interrupted status", wasInterrupted.get());
 
-    long timeElapsed = System.currentTimeMillis() - startTime;
+    long timeElapsed = EnvironmentEdgeManager.currentTime() - startTime;
     // We expect to wait at least SLEEP_TIME_MS, but we can wait more if there is a GC.
     assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " +
         " sleep time of " + SLEEP_TIME_MS, SLEEP_TIME_MS - timeElapsed < TOLERANCE_MS);
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
index d3be45b..ee52433 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
 import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -105,7 +106,7 @@ public class TestSecureExport {
   private static final byte[] ROW3 = Bytes.toBytes("row3");
   private static final byte[] QUAL = Bytes.toBytes("qual");
   private static final String LOCALHOST = "localhost";
-  private static final long NOW = System.currentTimeMillis();
+  private static final long NOW = EnvironmentEdgeManager.currentTime();
   // user granted with all global permission
   private static final String USER_ADMIN = "admin";
   // user is table owner. will have all permissions on table
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java
index ecc2559..9686e4b 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.metrics.Counter;
 import org.apache.hadoop.hbase.metrics.Gauge;
 import org.apache.hadoop.hbase.metrics.MetricRegistry;
 import org.apache.hadoop.hbase.metrics.Timer;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -77,14 +78,14 @@ public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, Mast
                              TableDescriptor desc, RegionInfo[] regions) throws IOException {
     // we rely on the fact that there is only 1 instance of our MasterObserver. We keep track of
     // when the operation starts before the operation is executing.
-    this.createTableStartTime = System.currentTimeMillis();
+    this.createTableStartTime = EnvironmentEdgeManager.currentTime();
   }
 
   @Override
   public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
                               TableDescriptor desc, RegionInfo[] regions) throws IOException {
     if (this.createTableStartTime > 0) {
-      long time = System.currentTimeMillis() - this.createTableStartTime;
+      long time = EnvironmentEdgeManager.currentTime() - this.createTableStartTime;
       LOG.info("Create table took: " + time);
 
       // Update the timer metric for the create table operation duration.
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
index c7ebf0d..b277507 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.common.math.IntMath;
@@ -225,7 +226,7 @@ public class WriteHeavyIncrementObserver implements RegionCoprocessor, RegionObs
   private long getUniqueTimestamp(byte[] row) {
     int slot = Bytes.hashCode(row) & mask;
     MutableLong lastTimestamp = lastTimestamps[slot];
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     synchronized (lastTimestamp) {
       long pt = lastTimestamp.longValue() >> 10;
       if (now > pt) {
diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
index 98514cb..5705c99 100644
--- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
+++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs;
@@ -112,7 +113,7 @@ public class TestZooKeeperScanPolicyObserver {
 
   @Test
   public void test() throws IOException, KeeperException, InterruptedException {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     put(0, 100, now - 10000);
     assertValueEquals(0, 100);
 
diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java
index 0301664..2846c25 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
 import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
 import org.apache.hadoop.hbase.hbtop.terminal.impl.TerminalImpl;
 import org.apache.hadoop.hbase.hbtop.terminal.impl.batch.BatchTerminal;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -98,7 +99,7 @@ public class Screen implements Closeable {
           nextScreenView = currentScreenView.handleKeyPress(keyPress);
         } else {
           if (timerTimestamp != null) {
-            long now = System.currentTimeMillis();
+            long now = EnvironmentEdgeManager.currentTime();
             if (timerTimestamp <= now) {
               // Dispatch the timer to the current screen
               timerTimestamp = null;
@@ -131,7 +132,7 @@ public class Screen implements Closeable {
   }
 
   public void setTimer(long delay) {
-    timerTimestamp = System.currentTimeMillis() + delay;
+    timerTimestamp = EnvironmentEdgeManager.currentTime() + delay;
   }
 
   public void cancelTimer() {
diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java
index bdd52a7..9cbcd18 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
 import org.apache.hadoop.hbase.hbtop.field.FieldValue;
 import org.apache.hadoop.hbase.hbtop.mode.DrillDownInfo;
 import org.apache.hadoop.hbase.hbtop.mode.Mode;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -146,7 +147,7 @@ public class TopScreenModel {
 
   private void refreshSummary(ClusterMetrics clusterMetrics) {
     String currentTime = ISO_8601_EXTENDED_TIME_FORMAT
-      .format(System.currentTimeMillis());
+      .format(EnvironmentEdgeManager.currentTime());
     String version = clusterMetrics.getHBaseVersion();
     String clusterId = clusterMetrics.getClusterId();
     int liveServers = clusterMetrics.getLiveServerMetrics().size();
diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java
index 6fbd23f..e4e3cae 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.hbtop.screen.help.HelpScreenView;
 import org.apache.hadoop.hbase.hbtop.screen.mode.ModeScreenView;
 import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 
 
@@ -92,7 +93,7 @@ public class TopScreenPresenter {
 
   public long refresh(boolean force) {
     if (!force) {
-      long delay = System.currentTimeMillis() - lastRefreshTimestamp;
+      long delay = EnvironmentEdgeManager.currentTime() - lastRefreshTimestamp;
       if (delay < refreshDelay.get()) {
         return refreshDelay.get() - delay;
       }
@@ -114,7 +115,7 @@ public class TopScreenPresenter {
 
     topScreenView.refreshTerminal();
 
-    lastRefreshTimestamp = System.currentTimeMillis();
+    lastRefreshTimestamp = EnvironmentEdgeManager.currentTime();
     iterations++;
     return refreshDelay.get();
   }
diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java
index a1d4173..cd49f7e 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java
@@ -28,7 +28,7 @@ import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@@ -43,7 +43,7 @@ public class NoCacheFilter implements Filter {
     throws IOException, ServletException {
     HttpServletResponse httpRes = (HttpServletResponse) res;
     httpRes.setHeader("Cache-Control", "no-cache");
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     httpRes.addDateHeader("Expires", now);
     httpRes.addDateHeader("Date", now);
     httpRes.addHeader("Pragma", "no-cache");
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java
index 31fb9e3..f1fed9e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.AsyncCallback;
@@ -118,9 +119,9 @@ public class ChaosZKClient {
         CreateMode.EPHEMERAL_SEQUENTIAL,
         submitTaskCallback,
         taskObject);
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
 
-      while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) {
+      while ((EnvironmentEdgeManager.currentTime() - start) < TASK_EXECUTION_TIMEOUT) {
         if(taskStatus != null) {
           return taskStatus;
         }
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 712a649..d4fa6ed 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -235,9 +236,9 @@ public class DistributedHBaseCluster extends HBaseCluster {
   private void waitForServiceToStop(ServiceType service, ServerName serverName, long timeout)
     throws IOException {
     LOG.info("Waiting for service: {} to stop: {}", service, serverName.getServerName());
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
 
-    while ((System.currentTimeMillis() - start) < timeout) {
+    while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
       if (!clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) {
         return;
       }
@@ -249,9 +250,9 @@ public class DistributedHBaseCluster extends HBaseCluster {
   private void waitForServiceToStart(ServiceType service, ServerName serverName, long timeout)
     throws IOException {
     LOG.info("Waiting for service: {} to start: ", service, serverName.getServerName());
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
 
-    while ((System.currentTimeMillis() - start) < timeout) {
+    while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
       if (clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) {
         return;
       }
@@ -285,8 +286,8 @@ public class DistributedHBaseCluster extends HBaseCluster {
 
   @Override
   public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
-    long start = System.currentTimeMillis();
-    while (System.currentTimeMillis() - start < timeout) {
+    long start = EnvironmentEdgeManager.currentTime();
+    while (EnvironmentEdgeManager.currentTime() - start < timeout) {
       try {
         connection.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION));
         return true;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
index ff6ae17..e399d81 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@@ -215,11 +216,11 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase {
     ColumnFamilyDescriptor[] columns = new ColumnFamilyDescriptor[] { cbuilder.build() };
     LOG.info("Creating table {} with {} splits.", tableName,
       regionsCountPerServer * regionServerCount);
-    startTime = System.currentTimeMillis();
+    startTime = EnvironmentEdgeManager.currentTime();
     HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns,
       regionsCountPerServer);
     util.waitTableAvailable(tableName);
-    endTime = System.currentTimeMillis();
+    endTime = EnvironmentEdgeManager.currentTime();
     LOG.info("Pre-split table created successfully in {}ms.", (endTime - startTime));
   }
 
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
index 1850e91..c5bdc51 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
@@ -25,6 +25,7 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HFileTestUtil;
 import org.apache.hadoop.hbase.util.LoadTestTool;
 import org.apache.hadoop.hbase.util.Threads;
@@ -162,15 +163,15 @@ public class IntegrationTestIngest extends IntegrationTestBase {
     LOG.info("Cluster size:" + util.getHBaseClusterInterface()
       .getClusterMetrics().getLiveServerMetrics().size());
 
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
     long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
     long startKey = 0;
 
     long numKeys = getNumKeys(keysPerServerPerIter);
-    while (System.currentTimeMillis() - start < 0.9 * runtime) {
+    while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) {
       LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
-          ((runtime - (System.currentTimeMillis() - start))/60000) + " min");
+          ((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min");
 
       int ret = -1;
       ret = loadTool.run(getArgsForLoadTestTool("-write",
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java
index 662e2a1..70abbde 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
 import org.junit.After;
@@ -126,11 +127,11 @@ public class IntegrationTestManyRegions {
     byte[][] splits = algo.split(REGION_COUNT);
 
     LOG.info(String.format("Creating table %s with %d splits.", TABLE_NAME, REGION_COUNT));
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     try {
       admin.createTable(tableDescriptor, splits);
       LOG.info(String.format("Pre-split table created successfully in %dms.",
-          (System.currentTimeMillis() - startTime)));
+          (EnvironmentEdgeManager.currentTime() - startTime)));
     } catch (IOException e) {
       LOG.error("Failed to create table", e);
     }
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java
index e4d961a..a307aed 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java
@@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.util.ConstantDelayQueue;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LoadTestTool;
 import org.apache.hadoop.hbase.util.MultiThreadedUpdater;
 import org.apache.hadoop.hbase.util.MultiThreadedWriter;
@@ -163,15 +164,15 @@ public class IntegrationTestRegionReplicaReplication extends IntegrationTestInge
       getConf().getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs",
         5000) + 1000);
 
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
     long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
     long startKey = 0;
 
     long numKeys = getNumKeys(keysPerServerPerIter);
-    while (System.currentTimeMillis() - start < 0.9 * runtime) {
+    while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) {
       LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
-          ((runtime - (System.currentTimeMillis() - start))/60000) + " min");
+          ((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min");
 
       int verifyPercent = 100;
       int updatePercent = 20;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
index 7f12d07..5eaa096 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.StripeStoreConfig;
 import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LoadTestKVGenerator;
 import org.apache.hadoop.hbase.util.MultiThreadedAction;
 import org.apache.hadoop.hbase.util.MultiThreadedReader;
@@ -206,14 +207,14 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
 
     if (preloadKeys > 0) {
       MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
-      long time = System.currentTimeMillis();
+      long time = EnvironmentEdgeManager.currentTime();
       preloader.start(0, startKey, writeThreads);
       preloader.waitForFinish();
       if (preloader.getNumWriteFailures() > 0) {
         throw new IOException("Preload failed");
       }
       int waitTime = (int)Math.min(preloadKeys / 100, 30000); // arbitrary
-      status(description + " preload took " + (System.currentTimeMillis()-time)/1000
+      status(description + " preload took " + (EnvironmentEdgeManager.currentTime()-time)/1000
           + "sec; sleeping for " + waitTime/1000 + "sec for store to stabilize");
       Thread.sleep(waitTime);
     }
@@ -223,7 +224,7 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
     // reader.getMetrics().enable();
     reader.linkToWriter(writer);
 
-    long testStartTime = System.currentTimeMillis();
+    long testStartTime = EnvironmentEdgeManager.currentTime();
     writer.start(startKey, endKey, writeThreads);
     reader.start(startKey, endKey, readThreads);
     writer.waitForFinish();
@@ -257,7 +258,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
       }
     }
     LOG.info("Performance data dump for " + description + " test: \n" + perfDump.toString());*/
-    status(description + " test took " + (System.currentTimeMillis()-testStartTime)/1000 + "sec");
+    status(description + " test took " +
+      (EnvironmentEdgeManager.currentTime() - testStartTime) / 1000 + "sec");
     Assert.assertTrue(success);
   }
 
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
index caa6968..a1cab78 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -67,7 +68,7 @@ public class MoveRegionsOfTableAction extends Action {
 
     Collections.shuffle(regions);
 
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     for (RegionInfo regionInfo : regions) {
       // Don't try the move if we're stopping
       if (context.isStopping()) {
@@ -81,7 +82,7 @@ public class MoveRegionsOfTableAction extends Action {
 
       // put a limit on max num regions. Otherwise, this won't finish
       // with a sleep time of 10sec, 100 regions will finish in 16min
-      if (System.currentTimeMillis() - start > maxTime) {
+      if (EnvironmentEdgeManager.currentTime() - start > maxTime) {
         break;
       }
     }
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java
index ea5729e..2f39233 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +49,7 @@ public class SnapshotTableAction extends Action {
   @Override
   public void perform() throws Exception {
     HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
-    String snapshotName = tableName + "-it-" + System.currentTimeMillis();
+    String snapshotName = tableName + "-it-" + EnvironmentEdgeManager.currentTime();
     Admin admin = util.getAdmin();
 
     // Don't try the snapshot if we're stopping
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
index 9ffef16..ae1c65e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.chaos.policies;
 
 import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 
 /** A policy which does stuff every time interval. */
@@ -37,11 +38,11 @@ public abstract class PeriodicPolicy extends Policy {
     Threads.sleep(jitter);
 
     while (!isStopped()) {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       runOneIteration();
 
       if (isStopped()) return;
-      long sleepTime = periodMs - (System.currentTimeMillis() - start);
+      long sleepTime = periodMs - (EnvironmentEdgeManager.currentTime() - start);
       if (sleepTime > 0) {
         LOG.info("Sleeping for {} ms", sleepTime);
         Threads.sleep(sleepTime);
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 065cec9..39e0911 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.Before;
@@ -127,7 +128,7 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase
     Configuration conf = getConf();
     TableName tableName = TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
     String snapshotName = conf.get(SNAPSHOT_NAME_KEY, tableName.getQualifierAsString()
-      + "_snapshot_" + System.currentTimeMillis());
+      + "_snapshot_" + EnvironmentEdgeManager.currentTime());
     int numRegions = conf.getInt(NUM_REGIONS_KEY, DEFAULT_NUM_REGIONS);
     String tableDirStr = conf.get(TABLE_DIR_KEY);
     Path tableDir;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 08538c9..1524062 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Random64;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -711,9 +712,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
           while (numQueries < maxQueries) {
             numQueries++;
             byte[] prev = node.prev;
-            long t1 = System.currentTimeMillis();
+            long t1 = EnvironmentEdgeManager.currentTime();
             node = getNode(prev, table, node);
-            long t2 = System.currentTimeMillis();
+            long t2 = EnvironmentEdgeManager.currentTime();
             if (node == null) {
               LOG.error("ConcurrentWalker found UNDEFINED NODE: " + Bytes.toStringBinary(prev));
               context.getCounter(Counts.UNDEFINED).increment(1l);
@@ -1714,10 +1715,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
       scan.setBatch(1);
       scan.addColumn(FAMILY_NAME, COLUMN_PREV);
 
-      long t1 = System.currentTimeMillis();
+      long t1 = EnvironmentEdgeManager.currentTime();
       ResultScanner scanner = table.getScanner(scan);
       Result result = scanner.next();
-      long t2 = System.currentTimeMillis();
+      long t2 = EnvironmentEdgeManager.currentTime();
       scanner.close();
 
       if ( result != null) {
@@ -1797,9 +1798,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
         while (node != null && node.prev.length != NO_KEY.length &&
             numQueries < maxQueries) {
           byte[] prev = node.prev;
-          long t1 = System.currentTimeMillis();
+          long t1 = EnvironmentEdgeManager.currentTime();
           node = getNode(prev, table, node);
-          long t2 = System.currentTimeMillis();
+          long t2 = EnvironmentEdgeManager.currentTime();
           if (logEvery > 0 && numQueries % logEvery == 0) {
             System.out.printf("CQ %d: %d %s \n", numQueries, t2 - t1, Bytes.toStringBinary(prev));
           }
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
index 64a9540..9c91796 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.test.util.warc.WARCInputFormat;
 import org.apache.hadoop.hbase.test.util.warc.WARCRecord;
 import org.apache.hadoop.hbase.test.util.warc.WARCWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
@@ -582,7 +583,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
           String contentType = warcHeader.getField("WARC-Identified-Payload-Type");
           if (contentType != null) {
             LOG.debug("Processing record id=" + recordID + ", targetURI=\"" + targetURI + "\"");
-            long now = System.currentTimeMillis();
+            long now = EnvironmentEdgeManager.currentTime();
 
             // Make row key
 
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
index b9cb167..c5416c9 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LoadTestTool;
 import org.apache.hadoop.hbase.util.MultiThreadedReader;
 import org.apache.hadoop.hbase.util.Threads;
@@ -143,7 +144,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
     LOG.info("Cluster size:"+
       util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size());
 
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
     long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
     long startKey = 0;
@@ -197,7 +198,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
 
     // set the intended run time for the reader. The reader will do read requests
     // to random keys for this amount of time.
-    long remainingTime = runtime - (System.currentTimeMillis() - start);
+    long remainingTime = runtime - (EnvironmentEdgeManager.currentTime() - start);
     if (remainingTime <= 0) {
       LOG.error("The amount of time left for the test to perform random reads is "
           + "non-positive. Increase the test execution time via "
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
index 3455d6a..aff83dd 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -89,7 +90,7 @@ public class TableRecordReaderImpl {
     }
     if (logScannerActivity) {
       LOG.info("Current scan=" + currentScan.toString());
-      timestamp = System.currentTimeMillis();
+      timestamp = EnvironmentEdgeManager.currentTime();
       rowcount = 0;
     }
   }
@@ -197,7 +198,7 @@ public class TableRecordReaderImpl {
         if (logScannerActivity) {
           rowcount ++;
           if (rowcount >= logPerRowCount) {
-            long now = System.currentTimeMillis();
+            long now = EnvironmentEdgeManager.currentTime();
             LOG.info("Mapper took " + (now-timestamp)
               + "ms to process " + rowcount + " rows");
             timestamp = now;
@@ -236,7 +237,7 @@ public class TableRecordReaderImpl {
       return false;
     } catch (IOException ioe) {
       if (logScannerActivity) {
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         LOG.info("Mapper took " + (now-timestamp)
           + "ms to process " + rowcount + " rows");
         LOG.info(ioe.toString(), ioe);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 665bc32..dd42661 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -419,8 +419,10 @@ public class HFileOutputFormat2
 
       private void close(final StoreFileWriter w) throws IOException {
         if (w != null) {
-          w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
-          w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString()));
+          w.appendFileInfo(BULKLOAD_TIME_KEY,
+            Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
+          w.appendFileInfo(BULKLOAD_TASK_KEY,
+            Bytes.toBytes(context.getTaskAttemptID().toString()));
           w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
           w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
           w.appendTrackedTimestampsToMetadata();
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 9b71479..2e94a90 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -764,7 +765,7 @@ public class ImportTsv extends Configured implements Tool {
     }
 
     // If timestamp option is not specified, use current system time.
-    long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis());
+    long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, EnvironmentEdgeManager.currentTime());
 
     // Set it back to replace invalid timestamp (non-numeric) with current
     // system time
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index b60f828..3aa9674 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
@@ -493,7 +494,7 @@ public class SyncTable extends Configured implements Tool {
             sourceCell.getFamilyOffset(), sourceCell.getFamilyLength())
           .setQualifier(sourceCell.getQualifierArray(),
             sourceCell.getQualifierOffset(), sourceCell.getQualifierLength())
-          .setTimestamp(System.currentTimeMillis())
+          .setTimestamp(EnvironmentEdgeManager.currentTime())
           .setValue(sourceCell.getValueArray(),
             sourceCell.getValueOffset(), sourceCell.getValueLength()).build();
       }
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index 81f9a7c..097b436 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
@@ -91,7 +92,7 @@ public class TableRecordReaderImpl {
     this.scanner = this.htable.getScanner(currentScan);
     if (logScannerActivity) {
       LOG.info("Current scan=" + currentScan.toString());
-      timestamp = System.currentTimeMillis();
+      timestamp = EnvironmentEdgeManager.currentTime();
       rowcount = 0;
     }
   }
@@ -211,7 +212,7 @@ public class TableRecordReaderImpl {
         if (logScannerActivity) {
           rowcount ++;
           if (rowcount >= logPerRowCount) {
-            long now = System.currentTimeMillis();
+            long now = EnvironmentEdgeManager.currentTime();
             LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount);
             timestamp = now;
             rowcount = 0;
@@ -263,7 +264,7 @@ public class TableRecordReaderImpl {
     } catch (IOException ioe) {
       updateCounters();
       if (logScannerActivity) {
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount);
         LOG.info(ioe.toString(), ioe);
         String lastRow = lastSuccessfulRow == null ?
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index 1b4ebd0..e3c4d7a 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.TableInfo;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
@@ -300,8 +301,8 @@ public class WALPlayer extends Configured implements Tool {
     conf.setStrings(TABLES_KEY, tables);
     conf.setStrings(TABLE_MAP_KEY, tableMap);
     conf.set(FileInputFormat.INPUT_DIR, inputDirs);
-    Job job =
-        Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis()));
+    Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" +
+      EnvironmentEdgeManager.currentTime()));
     job.setJarByClass(WALPlayer.class);
 
     job.setInputFormatClass(WALInputFormat.class);
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index a95a9f4..c5650c3 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.Pair;
@@ -416,7 +417,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
         int reportBytes = 0;
         int bytesRead;
 
-        long stime = System.currentTimeMillis();
+        long stime = EnvironmentEdgeManager.currentTime();
         while ((bytesRead = in.read(buffer)) > 0) {
           out.write(buffer, 0, bytesRead);
           totalBytesWritten += bytesRead;
@@ -431,7 +432,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
             reportBytes = 0;
           }
         }
-        long etime = System.currentTimeMillis();
+        long etime = EnvironmentEdgeManager.currentTime();
 
         context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
         context.setStatus(String.format(statusMessage,
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 8f3e45b..77dd13d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.ByteArrayHashKey;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.GsonUtil;
 import org.apache.hadoop.hbase.util.Hash;
 import org.apache.hadoop.hbase.util.MurmurHash;
@@ -1145,7 +1146,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
   static abstract class TestBase {
     // Below is make it so when Tests are all running in the one
     // jvm, that they each have a differently seeded Random.
-    private static final Random randomSeed = new Random(System.currentTimeMillis());
+    private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime());
 
     private static long nextRandomSeed() {
       return randomSeed.nextLong();
@@ -2391,7 +2392,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
       // write the serverName columns
       MetaTableAccessor.updateRegionLocation(connection,
         regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i,
-        System.currentTimeMillis());
+        EnvironmentEdgeManager.currentTime());
       return true;
     }
   }
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java
index 0fa558c..6908678 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LauncherSecurityManager;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.AfterClass;
@@ -67,7 +68,7 @@ public class TestCellCounter {
   private static Path FQ_OUTPUT_DIR;
   private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator
       + "output";
-  private static long now = System.currentTimeMillis();
+  private static long now = EnvironmentEdgeManager.currentTime();
 
   @Rule
   public TestName name = new TestName();
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
index 1b94ca8..7f983d3 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.mob.MobTestUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LauncherSecurityManager;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -290,7 +291,7 @@ public class TestCopyTable {
     p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23"));
     t.put(p);
 
-    long currentTime = System.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
     String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells",
       "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000),
       "--versions=1", sourceTable.getNameAsString() };
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index 7b38c59..fe35012 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LauncherSecurityManager;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -130,7 +131,7 @@ public class TestImportExport {
   private static String FQ_OUTPUT_DIR;
   private static final String EXPORT_BATCH_SIZE = "100";
 
-  private static final long now = System.currentTimeMillis();
+  private static final long now = EnvironmentEdgeManager.currentTime();
   private final TableName EXPORT_TABLE = TableName.valueOf("export_table");
   private final TableName IMPORT_TABLE = TableName.valueOf("import_table");
   public static final byte TEST_TAG_TYPE =  (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1);
@@ -605,7 +606,7 @@ public class TestImportExport {
   @Test
   public void testExportScan() throws Exception {
     int version = 100;
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     long endTime = startTime + 1;
     String prefix = "row";
     String label_0 = "label_0";
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
index add1b58..5793dfa 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.LauncherSecurityManager;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.Job;
@@ -249,12 +250,12 @@ public class TestRowCounter {
     // clean up content of TABLE_NAME
     Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM));
 
-    ts = System.currentTimeMillis();
+    ts = EnvironmentEdgeManager.currentTime();
     put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
     table.put(put1);
     Thread.sleep(100);
 
-    ts = System.currentTimeMillis();
+    ts = EnvironmentEdgeManager.currentTime();
     put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
     put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
     table.put(put2);
@@ -302,9 +303,9 @@ public class TestRowCounter {
     rowCounter.setConf(TEST_UTIL.getConfiguration());
     args = Arrays.copyOf(args, args.length+1);
     args[args.length-1]="--expectedCount=" + expectedCount;
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     int result = rowCounter.run(args);
-    long duration = System.currentTimeMillis() - start;
+    long duration = EnvironmentEdgeManager.currentTime() - start;
     LOG.debug("row count duration (ms): " + duration);
     assertTrue(result==0);
   }
@@ -318,9 +319,9 @@ public class TestRowCounter {
    */
   private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) throws Exception {
     Job job = RowCounter.createSubmittableJob(TEST_UTIL.getConfiguration(), args);
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     job.waitForCompletion(true);
-    long duration = System.currentTimeMillis() - start;
+    long duration = EnvironmentEdgeManager.currentTime() - start;
     LOG.debug("row count duration (ms): " + duration);
     assertTrue(job.isSuccessful());
     Counter counter = job.getCounters().findCounter(RowCounter.RowCounterMapper.Counters.ROWS);
@@ -486,12 +487,12 @@ public class TestRowCounter {
     // clean up content of TABLE_NAME
     Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM));
 
-    ts = System.currentTimeMillis();
+    ts = EnvironmentEdgeManager.currentTime();
     put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
     table.put(put1);
     Thread.sleep(100);
 
-    ts = System.currentTimeMillis();
+    ts = EnvironmentEdgeManager.currentTime();
     put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
     put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
     table.put(put2);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
index 9d5bae0..ef0e0d9 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.mapreduce.Counters;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -159,7 +160,7 @@ public class TestSyncTable {
     final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
     final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
     Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableIgnoreTimestampsTrue");
-    long current = System.currentTimeMillis();
+    long current = EnvironmentEdgeManager.currentTime();
     writeTestData(sourceTableName, targetTableName, current - 1000, current);
     hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true");
     Counters syncCounters = syncTables(sourceTableName, targetTableName,
@@ -490,7 +491,7 @@ public class TestSyncTable {
     int sourceRegions = 10;
     int targetRegions = 6;
     if (ArrayUtils.isEmpty(timestamps)) {
-      long current = System.currentTimeMillis();
+      long current = EnvironmentEdgeManager.currentTime();
       timestamps = new long[]{current,current};
     }
 
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java
index 8d21c39..48e8518 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -44,7 +45,7 @@ public class TestWALInputFormat {
   public void testAddFile() {
     List<FileStatus> lfss = new ArrayList<>();
     LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class);
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now));
     WALInputFormat.addFile(lfss, lfs, now, now);
     assertEquals(1, lfss.size());
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
index 40e7f37..c30235a 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -131,7 +132,7 @@ public class TestWALRecordReader {
     WAL log = walfactory.getWAL(info);
     // This test depends on timestamp being millisecond based and the filename of the WAL also
     // being millisecond based.
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     WALEdit edit = new WALEdit();
     edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value));
     log.appendData(info, getWalKeyImpl(ts, scopes), edit);
@@ -145,7 +146,7 @@ public class TestWALRecordReader {
     LOG.info("Past 1st WAL roll " + log.toString());
 
     Thread.sleep(1);
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
 
     edit = new WALEdit();
     edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value));
@@ -195,21 +196,24 @@ public class TestWALRecordReader {
     byte [] value = Bytes.toBytes("value");
     WALEdit edit = new WALEdit();
     edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
-        System.currentTimeMillis(), value));
-    long txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
+      EnvironmentEdgeManager.currentTime(), value));
+    long txid = log.appendData(info,
+      getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
     log.sync(txid);
 
     Thread.sleep(1); // make sure 2nd log gets a later timestamp
-    long secondTs = System.currentTimeMillis();
+    long secondTs = EnvironmentEdgeManager.currentTime();
     log.rollWriter();
 
     edit = new WALEdit();
-    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value));
-    txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
+    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
+      EnvironmentEdgeManager.currentTime(), value));
+    txid = log.appendData(info,
+      getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
     log.sync(txid);
     log.shutdown();
     walfactory.shutdown();
-    long thirdTs = System.currentTimeMillis();
+    long thirdTs = EnvironmentEdgeManager.currentTime();
 
     // should have 2 log files now
     WALInputFormat input = new WALInputFormat();
@@ -251,15 +255,19 @@ public class TestWALRecordReader {
     WAL log = walfactory.getWAL(info);
     byte [] value = Bytes.toBytes("value");
     WALEdit edit = new WALEdit();
-    edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), System.currentTimeMillis(), value));
-    long txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
+    edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
+      EnvironmentEdgeManager.currentTime(), value));
+    long txid = log.appendData(info,
+      getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
     log.sync(txid);
 
     Thread.sleep(10); // make sure 2nd edit gets a later timestamp
 
     edit = new WALEdit();
-    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value));
-    txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
+    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
+      EnvironmentEdgeManager.currentTime(), value));
+    txid = log.appendData(info,
+      getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
     log.sync(txid);
     log.shutdown();
 
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java
index 6869de9..04b766d 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.mapreduce.Job;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -283,20 +284,20 @@ public class TestVerifyReplication extends TestReplicationBase {
     // Take source and target tables snapshot
     Path rootDir = CommonFSUtils.getRootDir(CONF1);
     FileSystem fs = rootDir.getFileSystem(CONF1);
-    String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
       Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
 
     // Take target snapshot
     Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
     FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
-    String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
       Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
 
     String peerFSAddress = peerFs.getUri().toString();
     String tmpPath1 = UTIL1.getRandomDir().toString();
-    String tmpPath2 = "/tmp" + System.currentTimeMillis();
+    String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
 
     String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
       "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName,
@@ -320,11 +321,11 @@ public class TestVerifyReplication extends TestReplicationBase {
     Delete delete = new Delete(put.getRow());
     htable2.delete(delete);
 
-    sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
       Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
 
-    peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
       Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
 
@@ -388,20 +389,20 @@ public class TestVerifyReplication extends TestReplicationBase {
     // Take source and target tables snapshot
     Path rootDir = CommonFSUtils.getRootDir(CONF1);
     FileSystem fs = rootDir.getFileSystem(CONF1);
-    String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
             Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
 
     // Take target snapshot
     Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
     FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
-    String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName,
             Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
 
     String peerFSAddress = peerFs.getUri().toString();
     String tmpPath1 = UTIL1.getRandomDir().toString();
-    String tmpPath2 = "/tmp" + System.currentTimeMillis();
+    String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
 
     String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(),
       "--sourceSnapshotName=" + sourceSnapshotName,
@@ -426,11 +427,11 @@ public class TestVerifyReplication extends TestReplicationBase {
     Delete delete = new Delete(put.getRow());
     htable3.delete(delete);
 
-    sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
             Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
 
-    peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName,
             Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
 
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java
index 191b76b..51a0748 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -105,7 +106,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
     // normal Batch tests
     byte[] qualifierName = Bytes.toBytes("f1");
     Put put = new Put(Bytes.toBytes("r1"));
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1002"));
     htable1.put(put);
     put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v1001"));
@@ -169,7 +170,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
     // normal Batch tests
     byte[] qualifierName = Bytes.toBytes("f1");
     Put put = new Put(Bytes.toBytes("r1"));
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1"));
     htable1.put(put);
     put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v2"));
@@ -286,20 +287,20 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
     // Take source and target tables snapshot
     Path rootDir = CommonFSUtils.getRootDir(CONF1);
     FileSystem fs = rootDir.getFileSystem(CONF1);
-    String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
       Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
 
     // Take target snapshot
     Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
     FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
-    String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
       Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
 
     String peerFSAddress = peerFs.getUri().toString();
     String temPath1 = UTIL1.getRandomDir().toString();
-    String temPath2 = "/tmp" + System.currentTimeMillis();
+    String temPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
 
     String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
       "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
@@ -323,11 +324,11 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
     Delete delete = new Delete(put.getRow());
     htable2.delete(delete);
 
-    sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
       Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
 
-    peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
       Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
 
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
index cc052f6..1ab1f1a 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.mapreduce.Job;
 import org.junit.AfterClass;
@@ -165,14 +166,14 @@ public class TestVerifyReplicationCrossDiffHdfs {
   public void testVerifyRepBySnapshot() throws Exception {
     Path rootDir = CommonFSUtils.getRootDir(conf1);
     FileSystem fs = rootDir.getFileSystem(conf1);
-    String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
+    String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME,
         Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true);
 
     // Take target snapshot
     Path peerRootDir = CommonFSUtils.getRootDir(conf2);
     FileSystem peerFs = peerRootDir.getFileSystem(conf2);
-    String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
+    String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
     SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME,
         Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true);
 
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
index fc85040..d7da714 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -327,13 +328,14 @@ public class TestExportSnapshot {
 
   private Path getHdfsDestinationDir() {
     Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
-    Path path = new Path(new Path(rootDir, "export-test"), "export-" + System.currentTimeMillis());
+    Path path = new Path(new Path(rootDir, "export-test"), "export-" +
+      EnvironmentEdgeManager.currentTime());
     LOG.info("HDFS export destination path: " + path);
     return path;
   }
 
   static Path getLocalDestinationDir(HBaseTestingUtility htu) {
-    Path path = htu.getDataTestDir("local-export-" + System.currentTimeMillis());
+    Path path = htu.getDataTestDir("local-export-" + EnvironmentEdgeManager.currentTime());
     try {
       FileSystem fs = FileSystem.getLocal(htu.getConfiguration());
       LOG.info("Local export destination path: " + path);
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java
index 91e1108..8b5687b 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
 import org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -114,7 +115,7 @@ public class TestExportSnapshotV1NoCluster {
   static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtility hctu, Path testDir)
       throws IOException {
     Path path = new Path(new Path(testDir, "export-test"),
-      "export-" + System.currentTimeMillis()).makeQualified(fs.getUri(),
+      "export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(),
       fs.getWorkingDirectory());
     LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(),
       fs.getWorkingDirectory(), testDir);
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 9111345..b4e3d1e 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -284,7 +284,7 @@ public class ProcedureExecutor<TEnvironment> {
         }
         long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL);
         long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL);
-        if (retainer.isExpired(System.currentTimeMillis(), evictTtl, evictAckTtl)) {
+        if (retainer.isExpired(EnvironmentEdgeManager.currentTime(), evictTtl, evictAckTtl)) {
           LOG.debug("Procedure {} has already been finished and expired, skip force updating",
             procId);
           return;
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 6aed228..c5bd000 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase;
 import org.apache.hadoop.hbase.procedure2.util.ByteSlot;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -829,12 +830,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
           // Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing
           syncMaxSlot = runningProcCount;
           assert syncMaxSlot > 0 : "unexpected syncMaxSlot=" + syncMaxSlot;
-          final long syncWaitSt = System.currentTimeMillis();
+          final long syncWaitSt = EnvironmentEdgeManager.currentTime();
           if (slotIndex != syncMaxSlot) {
             slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS);
           }
 
-          final long currentTs = System.currentTimeMillis();
+          final long currentTs = EnvironmentEdgeManager.currentTime();
           final long syncWaitMs = currentTs - syncWaitSt;
           final float rollSec = getMillisFromLastRoll() / 1000.0f;
           final float syncedPerSec = totalSyncedToStore / rollSec;
@@ -979,7 +980,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
   }
 
   public long getMillisFromLastRoll() {
-    return (System.currentTimeMillis() - lastRollTs.get());
+    return (EnvironmentEdgeManager.currentTime() - lastRollTs.get());
   }
 
   void periodicRollForTesting() throws IOException {
@@ -1103,7 +1104,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
     stream = newStream;
     flushLogId = logId;
     totalSynced.set(0);
-    long rollTs = System.currentTimeMillis();
+    long rollTs = EnvironmentEdgeManager.currentTime();
     lastRollTs.set(rollTs);
     logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos, rollTs));
 
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java
index b121c07..f56cdb3 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.Before;
@@ -103,9 +104,9 @@ public class TestProcedureSchedulerConcurrency {
             }
           }
           if (wakeCount.get() != oldWakeCount) {
-            lastUpdate = System.currentTimeMillis();
+            lastUpdate = EnvironmentEdgeManager.currentTime();
           } else if (wakeCount.get() >= NRUNS &&
-              (System.currentTimeMillis() - lastUpdate) > WAIT_THRESHOLD) {
+              (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) {
             break;
           }
           Threads.sleepWithoutInterrupt(25);
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java
index cb31f02..17e4376 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
-
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
 
@@ -159,7 +159,8 @@ public abstract class ProcedureStorePerformanceEvaluation<T extends ProcedureSto
       boolean failure = false;
       try {
         for (Future<?> future : futures) {
-          long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - System.currentTimeMillis();
+          long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 -
+            EnvironmentEdgeManager.currentTime();
           failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE));
         }
       } catch (Exception e) {
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java
index 1d681e9..90d1b0a 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.procedure2.store.wal;
 
-import static java.lang.System.currentTimeMillis;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -36,7 +34,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
-
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
 
@@ -166,7 +164,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
     List<Integer> procStates = shuffleProcWriteSequence();
     TestProcedure[] procs = new TestProcedure[numProcs + 1];  // 0 is not used.
     int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE;
-    long startTime = currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     long lastTime = startTime;
     for (int i = 0; i < procStates.size(); ++i) {
       int procId = procStates.get(i);
@@ -181,14 +179,14 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
         store.update(procs[procId]);
       }
       if (i > 0 && i % numProcsPerWal == 0) {
-        long currentTime = currentTimeMillis();
+        long currentTime = EnvironmentEdgeManager.currentTime();
         System.out.println("Forcing wall roll. Time taken on last WAL: " +
             (currentTime - lastTime) / 1000.0f + " sec");
         store.rollWriterForTesting();
         lastTime = currentTime;
       }
     }
-    long timeTaken = currentTimeMillis() - startTime;
+    long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
     System.out.println("\n\nDone writing WALs.\nNum procs : " + numProcs + "\nTotal time taken : "
         + StringUtils.humanTimeDiff(timeTaken) + "\n\n");
   }
@@ -199,9 +197,9 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
     store.start(1);
     store.recoverLease();
 
-    long startTime = currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     store.load(loader);
-    long timeTaken = System.currentTimeMillis() - startTime;
+    long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
     System.out.println("******************************************");
     System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec");
     System.out.println("******************************************");
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
index 4e6adfb..d9b850e 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.rest.filter.GzipFilter;
 import org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.DNS;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.hbase.util.Strings;
@@ -395,7 +396,7 @@ public class RESTServer implements Constants {
     // Put up info server.
     int port = conf.getInt("hbase.rest.info.port", 8085);
     if (port >= 0) {
-      conf.setLong("startcode", System.currentTimeMillis());
+      conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
       String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
       this.infoServer = new InfoServer("rest", a, port, false, conf);
       this.infoServer.setAttribute("hbase.conf", conf);
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
index 304930c..4a4e10e 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.rest.model.ScannerModel;
 import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -112,7 +113,7 @@ public class ScannerResultGenerator extends ResultGenerator {
       }
       scanner = table.getScanner(scan);
       cached = null;
-      id = Long.toString(System.currentTimeMillis()) +
+      id = Long.toString(EnvironmentEdgeManager.currentTime()) +
              Integer.toHexString(scanner.hashCode());
     } finally {
       table.close();
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
index 05a4cac..7707733 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -44,6 +44,7 @@ import javax.net.ssl.SSLContext;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
@@ -332,7 +333,7 @@ public class Client {
         method.addHeader(header);
       }
     }
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     if (resp != null) EntityUtils.consumeQuietly(resp.getEntity());
     resp = httpClient.execute(method);
     if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
@@ -342,7 +343,7 @@ public class Client {
       resp = httpClient.execute(method);
     }
 
-    long endTime = System.currentTimeMillis();
+    long endTime = EnvironmentEdgeManager.currentTime();
     if (LOG.isTraceEnabled()) {
       LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " +
           resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms");
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
index d7a5cfd..0ed14f0 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.rest.client.Cluster;
 import org.apache.hadoop.hbase.rest.client.RemoteAdmin;
 import org.apache.hadoop.hbase.util.ByteArrayHashKey;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Hash;
 import org.apache.hadoop.hbase.util.MurmurHash;
 import org.apache.hadoop.hbase.util.Pair;
@@ -836,7 +837,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
     // Below is make it so when Tests are all running in the one
     // jvm, that they each have a differently seeded Random.
     private static final Random randomSeed =
-      new Random(System.currentTimeMillis());
+      new Random(EnvironmentEdgeManager.currentTime());
     private static long nextRandomSeed() {
       return randomSeed.nextLong();
     }
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
index 6f4dee5..8bad94d 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.RestTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -155,14 +156,14 @@ public class TestRemoteAdminRetries {
   }
 
   private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     try {
       callExecutor.run();
       fail("should be timeout exception!");
     } catch (IOException e) {
       assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString()));
     }
-    assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
+    assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME);
   }
 
   private static interface CallExecutor {
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
index 247897f..6338df8 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.testclassification.RestTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -182,14 +183,14 @@ public class TestRemoteHTableRetries {
   }
 
   private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     try {
       callExecutor.run();
       fail("should be timeout exception!");
     } catch (IOException e) {
       assertTrue(Pattern.matches(".*request timed out", e.toString()));
     }
-    assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
+    assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME);
   }
 
   private interface CallExecutor {
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 66baf16..99104b9 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.rest.RESTServlet;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RestTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.http.Header;
 import org.apache.http.message.BasicHeader;
 import org.junit.After;
@@ -97,7 +98,7 @@ public class TestRemoteTable {
   private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2");
 
   private static final long ONE_HOUR = 60 * 60 * 1000;
-  private static final long TS_2 = System.currentTimeMillis();
+  private static final long TS_2 = EnvironmentEdgeManager.currentTime();
   private static final long TS_1 = TS_2 - ONE_HOUR;
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java
index c78f3b3..8db0ca2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -62,8 +63,9 @@ public class HealthCheckChore extends ScheduledChore {
             + " number of times consecutively.");
       }
       // Always log health report.
-      LOG.info("Health status at " + StringUtils.formatTime(System.currentTimeMillis()) + " : "
-          + report.getHealthReport());
+      LOG.info("Health status at " +
+        StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " +
+          report.getHealthReport());
     }
   }
 
@@ -73,9 +75,9 @@ public class HealthCheckChore extends ScheduledChore {
       // First time we are seeing a failure. No need to stop, just
       // record the time.
       numTimesUnhealthy++;
-      startWindow = System.currentTimeMillis();
+      startWindow = EnvironmentEdgeManager.currentTime();
     } else {
-      if ((System.currentTimeMillis() - startWindow) < failureWindow) {
+      if ((EnvironmentEdgeManager.currentTime() - startWindow) < failureWindow) {
         numTimesUnhealthy++;
         if (numTimesUnhealthy == threshold) {
           stop = true;
@@ -85,7 +87,7 @@ public class HealthCheckChore extends ScheduledChore {
       } else {
         // Outside of failure window, so we reset to 1.
         numTimesUnhealthy = 1;
-        startWindow = System.currentTimeMillis();
+        startWindow = EnvironmentEdgeManager.currentTime();
         stop = false;
       }
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index a02ad7d..0ad4829 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.ShipperListener;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -1032,10 +1033,10 @@ public class HFileBlock implements Cacheable {
     protected void finishBlockAndWriteHeaderAndData(DataOutputStream out)
       throws IOException {
       ensureBlockReady();
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size());
       out.write(onDiskChecksum);
-      HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
+      HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
     }
 
     /**
@@ -1706,7 +1707,7 @@ public class HFileBlock implements Cacheable {
       // checksums. Can change with circumstances. The below flag is whether the
       // file has support for checksums (version 2+).
       boolean checksumSupport = this.fileContext.isUseHBaseChecksum();
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       if (onDiskSizeWithHeader <= 0) {
         // We were not passed the block size. Need to get it from the header. If header was
         // not cached (see getCachedHeader above), need to seek to pull it in. This is costly
@@ -1753,7 +1754,7 @@ public class HFileBlock implements Cacheable {
         if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) {
           return null;
         }
-        long duration = System.currentTimeMillis() - startTime;
+        long duration = EnvironmentEdgeManager.currentTime() - startTime;
         if (updateMetrics) {
           HFile.updateReadLatency(duration, pread);
         }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index d768d6e..39cd8eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.BloomFilterWriter;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -212,9 +213,9 @@ public class HFileWriterImpl implements HFile.Writer {
       throws IOException {
     trailer.setFileInfoOffset(outputStream.getPos());
     finishFileInfo();
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     fileInfo.write(out);
-    HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
+    HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
   }
 
   public long getPos() throws IOException {
@@ -841,9 +842,9 @@ public class HFileWriterImpl implements HFile.Writer {
     trailer.setEntryCount(entryCount);
     trailer.setCompressionCodec(hFileContext.getCompression());
 
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     trailer.serialize(outputStream);
-    HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
+    HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
 
     if (closeOutputStream) {
       outputStream.close();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java
index 99a3a2b..494a588 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -1074,7 +1075,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache {
       long freedSumMb = 0;
       int heavyEvictionCount = 0;
       int freedDataOverheadPercent = 0;
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       while (this.go) {
         synchronized (this) {
           try {
@@ -1107,7 +1108,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache {
         // This is should be almost the same time (+/- 10s)
         // because we get comparable volumes of freed bytes each time.
         // 10s because this is default period to run evict() (see above this.wait)
-        long stopTime = System.currentTimeMillis();
+        long stopTime = EnvironmentEdgeManager.currentTime();
         if ((stopTime - startTime) > 1000 * 10 - 1) {
           // Here we have to calc what situation we have got.
           // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit"
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index 175fb83..62a86d7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,7 +63,7 @@ public final class PrefetchExecutor {
       new ThreadFactory() {
         @Override
         public Thread newThread(Runnable r) {
-          String name = "hfile-prefetch-" + System.currentTimeMillis();
+          String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime();
           Thread t = new Thread(r, name);
           t.setDaemon(true);
           return t;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
index e57579f..1232231 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -103,7 +104,7 @@ public class CallRunner {
         }
         return;
       }
-      call.setStartTime(System.currentTimeMillis());
+      call.setStartTime(EnvironmentEdgeManager.currentTime());
       if (call.getStartTime() > call.getDeadline()) {
         RpcServer.LOG.warn("Dropping timed out call: " + call);
         return;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
index 2f97f53..deed987 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
@@ -30,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
 import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hbase.thirdparty.com.google.protobuf.Message;
@@ -116,7 +117,7 @@ class NettyServerRpcConnection extends ServerRpcConnection {
       long size, final InetAddress remoteAddress, int timeout,
       CallCleanup reqCleanup) {
     return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
-        remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator,
+        remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator,
         this.rpcServer.cellBlockBuilder, reqCleanup);
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index b0e8b7d..4ebb639 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.GsonUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -395,7 +396,7 @@ public abstract class RpcServer implements RpcServerInterface,
       Message result = call.getService().callBlockingMethod(md, controller, param);
       long receiveTime = call.getReceiveTime();
       long startTime = call.getStartTime();
-      long endTime = System.currentTimeMillis();
+      long endTime = EnvironmentEdgeManager.currentTime();
       int processingTime = (int) (endTime - startTime);
       int qTime = (int) (startTime - receiveTime);
       int totalTime = (int) (endTime - receiveTime);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
index 6cd11c4..88bb4c0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -432,7 +433,7 @@ public abstract class ServerCall<T extends ServerRpcConnection> implements RpcCa
   @Override
   public long disconnectSince() {
     if (!this.connection.isConnectionOpen()) {
-      return System.currentTimeMillis() - receiveTime;
+      return EnvironmentEdgeManager.currentTime() - receiveTime;
     } else {
       return -1L;
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index bab0a6d..20ea1f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.security.HBasePolicyProvider;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
@@ -315,7 +316,7 @@ public class SimpleRpcServer extends RpcServer {
       if (c == null) {
         return;
       }
-      c.setLastContact(System.currentTimeMillis());
+      c.setLastContact(EnvironmentEdgeManager.currentTime());
       try {
         count = c.readAndProcess();
       } catch (InterruptedException ieo) {
@@ -331,7 +332,7 @@ public class SimpleRpcServer extends RpcServer {
         closeConnection(c);
         c = null;
       } else {
-        c.setLastContact(System.currentTimeMillis());
+        c.setLastContact(EnvironmentEdgeManager.currentTime());
       }
     }
 
@@ -586,7 +587,8 @@ public class SimpleRpcServer extends RpcServer {
     }
 
     SimpleServerRpcConnection register(SocketChannel channel) {
-      SimpleServerRpcConnection connection = getConnection(channel, System.currentTimeMillis());
+      SimpleServerRpcConnection connection = getConnection(channel,
+        EnvironmentEdgeManager.currentTime());
       add(connection);
       if (LOG.isTraceEnabled()) {
         LOG.trace("Connection from " + connection +
@@ -617,7 +619,7 @@ public class SimpleRpcServer extends RpcServer {
     // synch'ed to avoid explicit invocation upon OOM from colliding with
     // timer task firing
     synchronized void closeIdle(boolean scanAll) {
-      long minLastContact = System.currentTimeMillis() - maxIdleTime;
+      long minLastContact = EnvironmentEdgeManager.currentTime() - maxIdleTime;
       // concurrent iterator might miss new connections added
       // during the iteration, but that's ok because they won't
       // be idle yet anyway and will be caught on next scan
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
index b68da56..d6d5dd0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java
@@ -29,9 +29,10 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Sends responses of RPC back to clients.
@@ -162,7 +163,7 @@ class SimpleRpcServerResponder extends Thread {
    * @return the time of the purge.
    */
   private long purge(long lastPurgeTime) {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     if (now < lastPurgeTime + this.simpleRpcServer.purgeTimeout) {
       return lastPurgeTime;
     }
@@ -247,7 +248,7 @@ class SimpleRpcServerResponder extends Thread {
       return true;
     } else {
       // set the serve time when the response has to be sent later
-      conn.lastSentTime = System.currentTimeMillis();
+      conn.lastSentTime = EnvironmentEdgeManager.currentTime();
       return false; // Socket can't take more, we will have to come back.
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
index 01127cc..0c7057a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.VersionInfoUtil;
 import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
 import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
 import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
 import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream;
 import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
@@ -209,7 +210,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
 
           // Notify the client about the offending request
           SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
-              null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
+              null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0,
               this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder);
           this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
           // Make sure the client recognizes the underlying exception
@@ -327,7 +328,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
       RequestHeader header, Message param, CellScanner cellScanner, long size,
       InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
     return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
-        remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator,
+        remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator,
         this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5c532f2..a0dfae7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -195,6 +195,7 @@ import org.apache.hadoop.hbase.security.SecurityConstants;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -802,7 +803,7 @@ public class HMaster extends HRegionServer implements MasterServices {
      */
     status.setStatus("Initializing Master file system");
 
-    this.masterActiveTime = System.currentTimeMillis();
+    this.masterActiveTime = EnvironmentEdgeManager.currentTime();
     // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
 
     // always initialize the MemStoreLAB as we use a region to store data in master now, see
@@ -896,7 +897,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     // Start the Zombie master detector after setting master as active, see HBASE-21535
     Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
-        "ActiveMasterInitializationMonitor-" + System.currentTimeMillis());
+        "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
     zombieDetector.setDaemon(true);
     zombieDetector.start();
 
@@ -1043,8 +1044,8 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     status.markComplete("Initialization successful");
     LOG.info(String.format("Master has completed initialization %.3fsec",
-       (System.currentTimeMillis() - masterActiveTime) / 1000.0f));
-    this.masterFinishedInitializationTime = System.currentTimeMillis();
+       (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
+    this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
     configurationManager.registerObserver(this.balancer);
     configurationManager.registerObserver(this.cleanerPool);
     configurationManager.registerObserver(this.hfileCleaner);
@@ -1104,11 +1105,11 @@ public class HMaster extends HRegionServer implements MasterServices {
      * After master has started up, lets do balancer post startup initialization. Since this runs
      * in activeMasterManager thread, it should be fine.
      */
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     this.balancer.postMasterStartupInitialize();
     if (LOG.isDebugEnabled()) {
       LOG.debug("Balancer post startup initialization complete, took " + (
-          (System.currentTimeMillis() - start) / 1000) + " seconds");
+          (EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
     }
   }
 
@@ -1618,7 +1619,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     // Sleep to next balance plan start time
     // But if there are zero regions in transition, it can skip sleep to speed up.
-    while (!interrupted && System.currentTimeMillis() < nextBalanceStartTime
+    while (!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
         && this.assignmentManager.getRegionStates().hasRegionsInTransition()) {
       try {
         Thread.sleep(100);
@@ -1631,7 +1632,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     while (!interrupted
         && maxRegionsInTransition > 0
         && this.assignmentManager.getRegionStates().getRegionsInTransitionCount()
-        >= maxRegionsInTransition && System.currentTimeMillis() <= cutoffTime) {
+        >= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) {
       try {
         // sleep if the number of regions in transition exceeds the limit
         Thread.sleep(100);
@@ -1758,7 +1759,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
     List<RegionPlan> successRegionPlans = new ArrayList<>();
     int maxRegionsInTransition = getMaxRegionsInTransition();
-    long balanceStartTime = System.currentTimeMillis();
+    long balanceStartTime = EnvironmentEdgeManager.currentTime();
     long cutoffTime = balanceStartTime + this.maxBalancingTime;
     int rpCount = 0;  // number of RegionPlans balanced so far
     if (plans != null && !plans.isEmpty()) {
@@ -1788,7 +1789,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
         // if performing next balance exceeds cutoff time, exit the loop
         if (this.maxBalancingTime > 0 && rpCount < plans.size()
-          && System.currentTimeMillis() > cutoffTime) {
+          && EnvironmentEdgeManager.currentTime() > cutoffTime) {
           // TODO: After balance, there should not be a cutoff time (keeping it as
           // a security net for now)
           LOG.debug("No more balancing till next balance run; maxBalanceTime="
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index b33d4b8..0a29627 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -2575,7 +2575,8 @@ public class MasterRpcServices extends RSRpcServices implements
         RegionState.State newState = RegionState.State.convert(s.getState());
         LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
           prevState.getState(), newState);
-        Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis());
+        Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info,
+          EnvironmentEdgeManager.currentTime());
         metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
           Bytes.toBytes(newState.name()));
         List<Put> putList = new ArrayList<>();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 47a34cc..a932ac6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -512,8 +512,7 @@ public class ServerManager {
     ZKWatcher zkw = master.getZooKeeper();
     int onlineServersCt;
     while ((onlineServersCt = onlineServers.size()) > 0){
-
-      if (System.currentTimeMillis() > (previousLogTime + 1000)) {
+      if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
         Set<ServerName> remainingServers = onlineServers.keySet();
         synchronized (onlineServers) {
           if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
@@ -530,7 +529,7 @@ public class ServerManager {
           sb.append(key);
         }
         LOG.info("Waiting on regionserver(s) " + sb.toString());
-        previousLogTime = System.currentTimeMillis();
+        previousLogTime = EnvironmentEdgeManager.currentTime();
       }
 
       try {
@@ -703,8 +702,8 @@ public class ServerManager {
     if (timeout < 0) {
       return;
     }
-    long expiration = timeout + System.currentTimeMillis();
-    while (System.currentTimeMillis() < expiration) {
+    long expiration = timeout + EnvironmentEdgeManager.currentTime();
+    while (EnvironmentEdgeManager.currentTime() < expiration) {
       try {
         RegionInfo rsRegion = ProtobufUtil.toRegionInfo(FutureUtils
           .get(
@@ -775,7 +774,7 @@ public class ServerManager {
       maxToStart = Integer.MAX_VALUE;
     }
 
-    long now =  System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     final long startTime = now;
     long slept = 0;
     long lastLogTime = 0;
@@ -808,7 +807,7 @@ public class ServerManager {
       // We sleep for some time
       final long sleepTime = 50;
       Thread.sleep(sleepTime);
-      now =  System.currentTimeMillis();
+      now = EnvironmentEdgeManager.currentTime();
       slept = now - startTime;
 
       oldCount = count;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 702d04b..e4bd670 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -2230,7 +2230,7 @@ public class AssignmentManager {
   private void acceptPlan(final HashMap<RegionInfo, RegionStateNode> regions,
       final Map<ServerName, List<RegionInfo>> plan) throws HBaseIOException {
     final ProcedureEvent<?>[] events = new ProcedureEvent[regions.size()];
-    final long st = System.currentTimeMillis();
+    final long st = EnvironmentEdgeManager.currentTime();
 
     if (plan.isEmpty()) {
       throw new HBaseIOException("unable to compute plans for regions=" + regions.size());
@@ -2256,7 +2256,7 @@ public class AssignmentManager {
     }
     ProcedureEvent.wakeEvents(getProcedureScheduler(), events);
 
-    final long et = System.currentTimeMillis();
+    final long et = EnvironmentEdgeManager.currentTime();
     if (LOG.isTraceEnabled()) {
       LOG.trace("ASSIGN ACCEPT " + events.length + " -> " +
           StringUtils.humanTimeDiff(et - st));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 5036711..8818067 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -111,11 +111,11 @@ public class RegionStateStore {
           if (r != null && !r.isEmpty()) {
             long st = 0;
             if (LOG.isTraceEnabled()) {
-              st = System.currentTimeMillis();
+              st = EnvironmentEdgeManager.currentTime();
             }
             visitMetaEntry(visitor, r);
             if (LOG.isTraceEnabled()) {
-              long et = System.currentTimeMillis();
+              long et = EnvironmentEdgeManager.currentTime();
               LOG.trace("[T] LOAD META PERF " + StringUtils.humanTimeDiff(et - st));
             }
           } else if (isDebugEnabled) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java
index 361b871..164752b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java
@@ -22,6 +22,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -98,11 +99,11 @@ public class DirScanPool implements ConfigurationObserver {
       return;
     }
     reconfigNotification = false;
-    long stopTime = System.currentTimeMillis() + timeout;
+    long stopTime = EnvironmentEdgeManager.currentTime() + timeout;
     while (cleanerLatch != 0 && timeout > 0) {
       try {
         wait(timeout);
-        timeout = stopTime - System.currentTimeMillis();
+        timeout = stopTime - EnvironmentEdgeManager.currentTime();
       } catch (InterruptedException ie) {
         Thread.currentThread().interrupt();
         break;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index ff28857..45b82e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.StealJobQueue;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -237,7 +238,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate>
         }
       };
       large.setDaemon(true);
-      large.setName(n + "-HFileCleaner.large." + i + "-" + System.currentTimeMillis());
+      large.setName(n + "-HFileCleaner.large." + i + "-" + EnvironmentEdgeManager.currentTime());
       large.start();
       LOG.debug("Starting for large file={}", large);
       threads.add(large);
@@ -252,7 +253,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate>
         }
       };
       small.setDaemon(true);
-      small.setName(n + "-HFileCleaner.small." + i + "-" + System.currentTimeMillis());
+      small.setName(n + "-HFileCleaner.small." + i + "-" + EnvironmentEdgeManager.currentTime());
       small.start();
       LOG.debug("Starting for small files={}", small);
       threads.add(small);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
index aaf5152..4f020ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -155,10 +156,12 @@ public final class LockManager {
       proc.setOwner(master.getMasterProcedureExecutor().getEnvironment().getRequestUser());
       master.getMasterProcedureExecutor().submitProcedure(proc);
 
-      long deadline = (timeoutMs > 0) ? System.currentTimeMillis() + timeoutMs : Long.MAX_VALUE;
-      while (deadline >= System.currentTimeMillis() && !proc.isLocked()) {
+      long deadline = (timeoutMs > 0) ? EnvironmentEdgeManager.currentTime() + timeoutMs :
+        Long.MAX_VALUE;
+      while (deadline >= EnvironmentEdgeManager.currentTime() && !proc.isLocked()) {
         try {
-          lockAcquireLatch.await(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+          lockAcquireLatch.await(deadline - EnvironmentEdgeManager.currentTime(),
+            TimeUnit.MILLISECONDS);
         } catch (InterruptedException e) {
           LOG.info("InterruptedException when waiting for lock: " + proc.toString());
           // kind of weird, releasing a lock which is not locked. This is to make the procedure
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index 539e567..404b8e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -183,14 +184,14 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   }
 
   private boolean hasHeartbeatExpired() {
-    return System.currentTimeMillis() - lastHeartBeat.get() >= getTimeout();
+    return EnvironmentEdgeManager.currentTime() - lastHeartBeat.get() >= getTimeout();
   }
 
   /**
    * Updates timeout deadline for the lock.
    */
   public void updateHeartBeat() {
-    lastHeartBeat.set(System.currentTimeMillis());
+    lastHeartBeat.set(EnvironmentEdgeManager.currentTime());
     if (LOG.isDebugEnabled()) {
       LOG.debug("Heartbeat " + toString());
     }
@@ -312,7 +313,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
       if (LOG.isDebugEnabled()) {
         LOG.debug("LOCKED " + toString());
       }
-      lastHeartBeat.set(System.currentTimeMillis());
+      lastHeartBeat.set(EnvironmentEdgeManager.currentTime());
       return LockState.LOCK_ACQUIRED;
     }
     LOG.warn("Failed acquire LOCK " + toString() + "; YIELDING");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java
index 7a475e2..e1acb6d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java
@@ -24,8 +24,8 @@ import java.util.Date;
 import java.util.LinkedList;
 import java.util.List;
 
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
-
 import org.apache.hbase.thirdparty.com.google.common.base.Charsets;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@@ -54,7 +54,7 @@ public class MemoryBoundedLogMessageBuffer {
    * older messages until the desired memory limit is achieved.
    */
   public synchronized void add(String messageText) {
-    LogMessage message = new LogMessage(messageText, System.currentTimeMillis());
+    LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTime());
     
     usage += message.estimateHeapUsage();
     messages.add(message);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
index d194d10..96fffa4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
@@ -25,7 +25,7 @@ import java.util.Map;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Operation;
 import org.apache.hadoop.hbase.util.Bytes;
-
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hbase.thirdparty.com.google.protobuf.Message;
 
 /**
@@ -196,7 +196,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl
       long queueTime) {
     this.methodName = methodName;
     this.params = params;
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     this.rpcStartTime = now;
     setWarnTime(now);
     this.rpcQueueTime = queueTime;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
index af6a47a..011c198 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
@@ -24,6 +24,8 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.GsonUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -47,7 +49,7 @@ class MonitoredTaskImpl implements MonitoredTask {
   private static final Gson GSON = GsonUtil.createGson().create();
 
   public MonitoredTaskImpl() {
-    startTime = System.currentTimeMillis();
+    startTime = EnvironmentEdgeManager.currentTime();
     statusTime = startTime;
     stateTime = startTime;
     warnTime = startTime;
@@ -161,7 +163,7 @@ class MonitoredTaskImpl implements MonitoredTask {
   @Override
   public void setStatus(String status) {
     this.status = status;
-    statusTime = System.currentTimeMillis();
+    statusTime = EnvironmentEdgeManager.currentTime();
     if (journalEnabled) {
       journal.add(new StatusJournalEntryImpl(this.status, statusTime));
     }
@@ -169,7 +171,7 @@ class MonitoredTaskImpl implements MonitoredTask {
 
   protected void setState(State state) {
     this.state = state;
-    stateTime = System.currentTimeMillis();
+    stateTime = EnvironmentEdgeManager.currentTime();
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java
index c0a8e1d..d6701cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.namequeues.RpcLogDetails;
 import org.apache.hadoop.hbase.namequeues.SlowLogPersistentService;
 import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
 import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -128,7 +129,7 @@ public class SlowLogQueueService implements NamedQueueService {
     Message param = rpcLogDetails.getParam();
     long receiveTime = rpcCall.getReceiveTime();
     long startTime = rpcCall.getStartTime();
-    long endTime = System.currentTimeMillis();
+    long endTime = EnvironmentEdgeManager.currentTime();
     int processingTime = (int) (endTime - startTime);
     int qTime = (int) (startTime - receiveTime);
     final SlowLogParams slowLogParams = ProtobufUtil.getSlowLogParams(param);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ece85fc..dd1d0d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -830,7 +830,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     /*
      * timestamp.slop provides a server-side constraint on the timestamp. This
-     * assumes that you base your TS around currentTimeMillis(). In this case,
+     * assumes that you base your TS around EnvironmentEdgeManager.currentTime(). In this case,
      * throw an error to the user if the user-specified TS is newer than now +
      * slop. LATEST_TIMESTAMP == don't use this functionality
      */
@@ -1945,7 +1945,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         return true;
       }
       if (!writestate.flushing) return true;
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       long duration = 0;
       boolean interrupted = false;
       LOG.debug("waiting for cache flush to complete for region " + this);
@@ -1961,7 +1961,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
             interrupted = true;
             break;
           } finally {
-            duration = System.currentTimeMillis() - start;
+            duration = EnvironmentEdgeManager.currentTime() - start;
           }
         }
       } finally {
@@ -6621,7 +6621,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       if (call.isPresent()) {
         long deadline = call.get().getDeadline();
         if (deadline < Long.MAX_VALUE) {
-          int timeToDeadline = (int) (deadline - System.currentTimeMillis());
+          int timeToDeadline = (int) (deadline - EnvironmentEdgeManager.currentTime());
           if (timeToDeadline <= this.rowLockWaitDuration) {
             reachDeadlineFirst = true;
             timeout = timeToDeadline;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 52ac42a..330c714 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -580,7 +580,7 @@ public class HRegionServer extends Thread implements
   public HRegionServer(final Configuration conf) throws IOException {
     super("RegionServer");  // thread name
     try {
-      this.startcode = System.currentTimeMillis();
+      this.startcode = EnvironmentEdgeManager.currentTime();
       this.conf = conf;
       this.dataFsOk = true;
       this.masterless = conf.getBoolean(MASTERLESS_CONFIG_NAME, false);
@@ -1059,7 +1059,7 @@ public class HRegionServer extends Thread implements
       }
 
       // We registered with the Master.  Go into run mode.
-      long lastMsg = System.currentTimeMillis();
+      long lastMsg = EnvironmentEdgeManager.currentTime();
       long oldRequestCount = -1;
       // The main run loop.
       while (!isStopped() && isHealthy()) {
@@ -1090,10 +1090,10 @@ public class HRegionServer extends Thread implements
             LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString());
           }
         }
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if ((now - lastMsg) >= msgInterval) {
           tryRegionServerReport(lastMsg, now);
-          lastMsg = System.currentTimeMillis();
+          lastMsg = EnvironmentEdgeManager.currentTime();
         }
         if (!isStopped() && !isAborted()) {
           this.sleeper.sleep();
@@ -1493,8 +1493,8 @@ public class HRegionServer extends Thread implements
         // Only print a message if the count of regions has changed.
         if (count != lastCount) {
           // Log every second at most
-          if (System.currentTimeMillis() > (previousLogTime + 1000)) {
-            previousLogTime = System.currentTimeMillis();
+          if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
+            previousLogTime = EnvironmentEdgeManager.currentTime();
             lastCount = count;
             LOG.info("Waiting on " + count + " regions to close");
             // Only print out regions still closing if a small number else will
@@ -2779,9 +2779,9 @@ public class HRegionServer extends Thread implements
             LOG.debug("No master found and cluster is stopped; bailing out");
             return null;
           }
-          if (System.currentTimeMillis() > (previousLogTime + 1000)) {
+          if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
             LOG.debug("No master found; retry");
-            previousLogTime = System.currentTimeMillis();
+            previousLogTime = EnvironmentEdgeManager.currentTime();
           }
           refresh = true; // let's try pull it from ZK directly
           if (sleepInterrupted(200)) {
@@ -2806,7 +2806,7 @@ public class HRegionServer extends Thread implements
           intLockStub = LockService.newBlockingStub(channel);
           break;
         } catch (IOException e) {
-          if (System.currentTimeMillis() > (previousLogTime + 1000)) {
+          if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
             e = e instanceof RemoteException ?
               ((RemoteException)e).unwrapRemoteException() : e;
             if (e instanceof ServerNotRunningYetException) {
@@ -2814,7 +2814,7 @@ public class HRegionServer extends Thread implements
             } else {
               LOG.warn("Unable to connect to master. Retrying. Error was:", e);
             }
-            previousLogTime = System.currentTimeMillis();
+            previousLogTime = EnvironmentEdgeManager.currentTime();
           }
           if (sleepInterrupted(200)) {
             interrupted = true;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 79d77c7..fa401e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2048,10 +2048,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       // We are assigning meta, wait a little for regionserver to finish initialization.
       int timeout = regionServer.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
         HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout
-      long endTime = System.currentTimeMillis() + timeout;
+      long endTime = EnvironmentEdgeManager.currentTime() + timeout;
       synchronized (regionServer.online) {
         try {
-          while (System.currentTimeMillis() <= endTime
+          while (EnvironmentEdgeManager.currentTime() <= endTime
               && !regionServer.isStopped() && !regionServer.isOnline()) {
             regionServer.online.wait(regionServer.msgInterval);
           }
@@ -3336,7 +3336,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       timeLimitDelta = Math.max(timeLimitDelta / 2, minimumScanTimeLimitDelta);
       // XXX: Can not use EnvironmentEdge here because TestIncrementTimeRange use a
       // ManualEnvironmentEdge. Consider using System.nanoTime instead.
-      return System.currentTimeMillis() + timeLimitDelta;
+      return EnvironmentEdgeManager.currentTime() + timeLimitDelta;
     }
     // Default value of timeLimit is negative to indicate no timeLimit should be
     // enforced.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 04eabb1..1354806 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -21,9 +21,10 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
-import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics;
 
 /**
  * ScannerContext instances encapsulate limit tracking AND progress towards those limits during
@@ -55,7 +56,7 @@ public class ScannerContext {
   /**
    * A different set of progress fields. Only include batch, dataSize and heapSize. Compare to
    * LimitFields, ProgressFields doesn't contain time field. As we save a deadline in LimitFields,
-   * so use {@link System#currentTimeMillis()} directly when check time limit.
+   * so use {@link EnvironmentEdgeManager.currentTime()} directly when check time limit.
    */
   ProgressFields progress;
 
@@ -310,12 +311,12 @@ public class ScannerContext {
 
   /**
    * @param checkerScope The scope that the limit is being checked from. The time limit is always
-   *          checked against {@link System#currentTimeMillis()}
+   *          checked against {@link EnvironmentEdgeManager.currentTime}
    * @return true when the limit is enforceable from the checker's scope and it has been reached
    */
   boolean checkTimeLimit(LimitScope checkerScope) {
     return hasTimeLimit(checkerScope) &&
-      (returnImmediately || System.currentTimeMillis() >= limits.getTime());
+      (returnImmediately || EnvironmentEdgeManager.currentTime() >= limits.getTime());
   }
 
   /**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 6483111..2fb2a08 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -148,7 +148,7 @@ public abstract class Compactor<T extends CellSink> {
   private FileDetails getFileDetails(
       Collection<HStoreFile> filesToCompact, boolean allFiles, boolean major) throws IOException {
     FileDetails fd = new FileDetails();
-    long oldestHFileTimestampToKeepMVCC = System.currentTimeMillis() -
+    long oldestHFileTimestampToKeepMVCC = EnvironmentEdgeManager.currentTime() -
       (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
 
     for (HStoreFile file : filesToCompact) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java
index ff39531..ffdade1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
 import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Handles log splitting a wal
@@ -68,7 +69,7 @@ public class WALSplitterHandler extends EventHandler {
 
   @Override
   public void process() throws IOException {
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     Status status = null;
     try {
       status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), reporter);
@@ -101,7 +102,7 @@ public class WALSplitterHandler extends EventHandler {
       }
     } finally {
       LOG.info("Worker " + serverName + " done with task " + splitTaskDetails.toString() + " in "
-          + (System.currentTimeMillis() - startTime) + "ms. Status = " + status);
+          + (EnvironmentEdgeManager.currentTime() - startTime) + "ms. Status = " + status);
       this.inProgressTasks.decrementAndGet();
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 52f3f71..ba302cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -629,7 +629,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
    * retrieve the next path to use for writing. Increments the internal filenum.
    */
   private Path getNewPath() throws IOException {
-    this.filenum.set(System.currentTimeMillis());
+    this.filenum.set(EnvironmentEdgeManager.currentTime());
     Path newPath = getCurrentFileName();
     while (fs.exists(newPath)) {
       this.filenum.incrementAndGet();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
index e824787..23db3dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
@@ -155,7 +156,7 @@ public class WALUtil {
     final Map<String, byte[]> extendedAttributes, final boolean sync) throws IOException {
     // TODO: Pass in current time to use?
     WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(),
-      System.currentTimeMillis(), mvcc, replicationScope, extendedAttributes);
+      EnvironmentEdgeManager.currentTime(), mvcc, replicationScope, extendedAttributes);
     long trx = MultiVersionConcurrencyControl.NONE;
     try {
       trx = wal.appendMarker(hri, walKey, edit);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 92c57a8..dde163d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
@@ -208,8 +209,9 @@ public class DumpReplicationQueues extends Configured implements Tool {
     Connection connection = ConnectionFactory.createConnection(conf);
     Admin admin = connection.getAdmin();
 
-    ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(),
-        new WarnOnlyAbortable(), true);
+    ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" +
+        EnvironmentEdgeManager.currentTime(),
+      new WarnOnlyAbortable(), true);
 
     try {
       // Our zk watcher
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index c77f74f..6dd60d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -458,12 +459,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
 
     int numSinks = getNumSinks();
     if (numSinks == 0) {
-      if((System.currentTimeMillis() - lastSinkFetchTime) >= (maxRetriesMultiplier*1000)) {
+      if ((EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >=
+          (maxRetriesMultiplier*1000)) {
         LOG.warn(
           "No replication sinks found, returning without replicating. "
             + "The source should retry with the same set of edits. Not logging this again for "
             + "the next {} seconds.", maxRetriesMultiplier);
-        lastSinkFetchTime = System.currentTimeMillis();
+        lastSinkFetchTime = EnvironmentEdgeManager.currentTime();
       }
       sleepForRetries("No sinks available at peer", sleepMultiplier);
       return false;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java
index 5eb3f87..dede79d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * This class is for maintaining the various replication statistics for a sink and publishing them
@@ -28,8 +29,8 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 @InterfaceAudience.Private
 public class MetricsSink {
 
-  private long lastTimestampForAge = System.currentTimeMillis();
-  private long startTimestamp = System.currentTimeMillis();
+  private long lastTimestampForAge = EnvironmentEdgeManager.currentTime();
+  private long startTimestamp = EnvironmentEdgeManager.currentTime();
   private final MetricsReplicationSinkSource mss;
 
   public MetricsSink() {
@@ -47,7 +48,7 @@ public class MetricsSink {
     long age = 0;
     if (lastTimestampForAge != timestamp) {
       lastTimestampForAge = timestamp;
-      age = System.currentTimeMillis() - lastTimestampForAge;
+      age = EnvironmentEdgeManager.currentTime() - lastTimestampForAge;
     }
     mss.setLastAppliedOpAge(age);
     return age;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
index 098ba02..9754c49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -342,10 +343,10 @@ public class ReplicationSourceShipper extends Thread {
    * have been triggered interruption/termination prior to calling this method.
    */
   void clearWALEntryBatch() {
-    long timeout = System.currentTimeMillis() + this.shipEditsTimeout;
+    long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout;
     while(this.isAlive() || this.entryReader.isAlive()){
       try {
-        if (System.currentTimeMillis() >= timeout) {
+        if (EnvironmentEdgeManager.currentTime() >= timeout) {
           LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper "
             + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}",
             this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index a43be29..a6362f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.AsyncClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
@@ -74,8 +75,9 @@ public class ReplicationSyncUp extends Configured implements Tool {
       }
     };
     Configuration conf = getConf();
-    try (ZKWatcher zkw =
-      new ZKWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable, true)) {
+    try (ZKWatcher zkw = new ZKWatcher(conf,
+        "syncupReplication" + EnvironmentEdgeManager.currentTime(),
+        abortable, true)) {
       Path walRootDir = CommonFSUtils.getWALRootDir(conf);
       FileSystem fs = CommonFSUtils.getWALFileSystem(conf);
       Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -107,7 +109,7 @@ public class ReplicationSyncUp extends Configured implements Tool {
 
     DummyServer(ZKWatcher zkw) {
       // a unique name in case the first run fails
-      hostname = System.currentTimeMillis() + ".SyncUpTool.replication.org";
+      hostname = EnvironmentEdgeManager.currentTime() + ".SyncUpTool.replication.org";
       this.zkw = zkw;
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
index 6cf1916..2f00b1e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -150,11 +151,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
   public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces,
       Set<TableName> skipTables) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
         skipTables);
       LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Set HDFS acl error when grant: {}", userPermission, e);
@@ -172,11 +173,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
   public boolean revokeAcl(UserPermission userPermission, Set<String> skipNamespaces,
       Set<TableName> skipTables) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
         skipTables);
       LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Set HDFS acl error when revoke: {}", userPermission, e);
@@ -191,7 +192,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
    */
   public boolean snapshotAcl(SnapshotDescription snapshot) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       TableName tableName = snapshot.getTableName();
       // global user permission can be inherited from default acl automatically
       Set<String> userSet = getUsersWithTableReadAction(tableName, true, false);
@@ -201,7 +202,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
             true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
       }
       LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
@@ -218,13 +219,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
   public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers,
       String operation) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       if (removeUsers.size() > 0) {
         handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers,
           HDFSAclOperation.OperationType.REMOVE);
       }
       LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e);
@@ -240,13 +241,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
    */
   public boolean removeNamespaceDefaultAcl(String namespace, Set<String> removeUsers) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       Path archiveNsDir = pathHelper.getArchiveNsDir(namespace);
       HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers,
           HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
       operation.handleAcl();
       LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e);
@@ -262,13 +263,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
    */
   public boolean removeTableDefaultAcl(TableName tableName, Set<String> removeUsers) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       Path archiveTableDir = pathHelper.getArchiveTableDir(tableName);
       HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers,
           HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT);
       operation.handleAcl();
       LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Remove HDFS acl error when delete table {}", tableName, e);
@@ -284,7 +285,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
    */
   public boolean addTableAcl(TableName tableName, Set<String> users, String operation) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       if (users.size() > 0) {
         HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY;
         handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType);
@@ -292,7 +293,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
           operationType);
       }
       LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e);
@@ -308,13 +309,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable {
    */
   public boolean removeTableAcl(TableName tableName, Set<String> users) {
     try {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       if (users.size() > 0) {
         handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0),
           HDFSAclOperation.OperationType.REMOVE);
       }
       LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName,
-        System.currentTimeMillis() - start);
+        EnvironmentEdgeManager.currentTime() - start);
       return true;
     } catch (Exception e) {
       LOG.error("Set HDFS acl error when create or modify table {}", tableName, e);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java
index f2c2d46..df48083 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java
@@ -615,9 +615,9 @@ public class CanaryTool implements Tool, Canary {
             tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(),
             Bytes.toStringBinary(rowToCheck));
           try {
-            long startTime = System.currentTimeMillis();
+            long startTime = EnvironmentEdgeManager.currentTime();
             table.put(put);
-            long time = System.currentTimeMillis() - startTime;
+            long time = EnvironmentEdgeManager.currentTime() - startTime;
             this.readWriteLatency.add(time);
             sink.publishWriteTiming(serverName, region, column, time);
           } catch (Exception e) {
@@ -1017,8 +1017,8 @@ public class CanaryTool implements Tool, Canary {
         // Do monitor !!
         try {
           monitor = this.newMonitor(connection, monitorTargets);
-          monitorThread = new Thread(monitor, "CanaryMonitor-" + System.currentTimeMillis());
-          startTime = System.currentTimeMillis();
+          startTime = EnvironmentEdgeManager.currentTime();
+          monitorThread = new Thread(monitor, "CanaryMonitor-" + startTime);
           monitorThread.start();
           while (!monitor.isDone()) {
             // wait for 1 sec
@@ -1032,7 +1032,7 @@ public class CanaryTool implements Tool, Canary {
                 return INIT_ERROR_EXIT_CODE;
               }
             }
-            currentTimeLength = System.currentTimeMillis() - startTime;
+            currentTimeLength = EnvironmentEdgeManager.currentTime() - startTime;
             if (currentTimeLength > timeout) {
               LOG.error("The monitor is running too long (" + currentTimeLength
                   + ") after timeout limit:" + timeout
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index c73e530..6a3dd6f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -607,7 +607,7 @@ public final class FSUtils {
   throws IOException {
     // Rewrite the file as pb.  Move aside the old one first, write new
     // then delete the moved-aside file.
-    Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
+    Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTime());
     if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
     setClusterId(fs, rootdir, cid, 100);
     if (!fs.delete(movedAsideName, false)) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index cc0f49a..1e2ac3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -282,14 +282,14 @@ public class JVMClusterUtil {
       }
     }
     boolean wasInterrupted = false;
-    final long maxTime = System.currentTimeMillis() + 30 * 1000;
+    final long maxTime = EnvironmentEdgeManager.currentTime() + 30 * 1000;
     if (regionservers != null) {
       // first try nicely.
       for (RegionServerThread t : regionservers) {
         t.getRegionServer().stop("Shutdown requested");
       }
       for (RegionServerThread t : regionservers) {
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (t.isAlive() && !wasInterrupted && now < maxTime) {
           try {
             t.join(maxTime - now);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index a3a0c7b..b1517c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -63,7 +63,7 @@ public abstract class ModifyRegionUtils {
 
   public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor,
       byte[][] splitKeys) {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     RegionInfo[] hRegionInfos = null;
     if (splitKeys == null || splitKeys.length == 0) {
       hRegionInfos = new RegionInfo[]{
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
index 35467bc..11bbd21 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
@@ -488,7 +488,7 @@ public class RegionSplitter {
           daughterRegions.get(rsLocation).add(dr);
         }
         LOG.debug("Done with bucketing.  Split time!");
-        long startTime = System.currentTimeMillis();
+        long startTime = EnvironmentEdgeManager.currentTime();
 
         // Open the split file and modify it as splits finish
         byte[] rawData = readFile(fs, splitFile);
@@ -604,7 +604,7 @@ public class RegionSplitter {
                       + " " + splitAlgo.rowToStr(region.getSecond()) + "\n");
                   splitCount++;
                   if (splitCount % 10 == 0) {
-                    long tDiff = (System.currentTimeMillis() - startTime)
+                    long tDiff = (EnvironmentEdgeManager.currentTime() - startTime)
                         / splitCount;
                     LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount
                         + ". Avg Time / Split = "
@@ -633,7 +633,7 @@ public class RegionSplitter {
             }
             LOG.debug("All regions have been successfully split!");
           } finally {
-            long tDiff = System.currentTimeMillis() - startTime;
+            long tDiff = EnvironmentEdgeManager.currentTime() - startTime;
             LOG.debug("TOTAL TIME = "
                 + org.apache.hadoop.util.StringUtils.formatTime(tDiff));
             LOG.debug("Splits = " + splitCount);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java
index 0eda459..70e58f6d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,7 +86,7 @@ public class MajorCompactionTTLRequest extends MajorCompactionRequest {
     if (colDesc.getTimeToLive() == HConstants.FOREVER) {
       return -1;
     }
-    return System.currentTimeMillis() - (colDesc.getTimeToLive() * 1000L);
+    return EnvironmentEdgeManager.currentTime() - (colDesc.getTimeToLive() * 1000L);
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
index 370a3e8..d841ab4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -487,7 +488,8 @@ public class MajorCompactor extends Configured implements Tool {
     Configuration configuration = getConf();
     int concurrency = Integer.parseInt(commandLine.getOptionValue("servers"));
     long minModTime = Long.parseLong(
-        commandLine.getOptionValue("minModTime", String.valueOf(System.currentTimeMillis())));
+        commandLine.getOptionValue("minModTime",
+          String.valueOf(EnvironmentEdgeManager.currentTime())));
     String quorum =
         commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM));
     String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java
index 4d89c47..3ad1c5c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.regionserver.wal.WALClosedException;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -153,7 +154,7 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
   @Override
   public void run() {
     while (running) {
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       checkLowReplication(now);
       synchronized (this) {
         if (wals.values().stream().noneMatch(rc -> rc.needsRoll(now))) {
@@ -230,7 +231,8 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
    */
   public boolean walRollFinished() {
     // TODO add a status field of roll in RollController
-    return wals.values().stream().noneMatch(rc -> rc.needsRoll(System.currentTimeMillis()))
+    return wals.values().stream()
+        .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime()))
       && isWaiting();
   }
 
@@ -261,7 +263,7 @@ public abstract class AbstractWALRoller<T extends Abortable> extends Thread
     RollController(WAL wal) {
       this.wal = wal;
       this.rollRequest = new AtomicBoolean(false);
-      this.lastRollTime = System.currentTimeMillis();
+      this.lastRollTime = EnvironmentEdgeManager.currentTime();
     }
 
     public void requestRoll() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java
index 6361ffc..27c86c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.ConcurrentMapUtils.IOExceptionSupplier;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
@@ -334,7 +335,7 @@ public final class WALSplitUtil {
   public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits)
       throws IOException {
     Path moveAsideName =
-        new Path(edits.getParent(), edits.getName() + "." + System.currentTimeMillis());
+        new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime());
     if (!fs.rename(edits, moveAsideName)) {
       LOG.warn("Rename failed from {} to {}", edits, moveAsideName);
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
index 85dff35..fcf18f9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -128,8 +129,8 @@ public abstract class HBaseCluster implements Closeable, Configurable {
    */
   public void waitForRegionServerToStart(String hostname, int port, long timeout)
       throws IOException {
-    long start = System.currentTimeMillis();
-    while ((System.currentTimeMillis() - start) < timeout) {
+    long start = EnvironmentEdgeManager.currentTime();
+    while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
       for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) {
         if (server.getHostname().equals(hostname) && server.getPort() == port) {
           return;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 3d57c30..840b9e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@@ -2920,9 +2921,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
 
     //ensure that we have connection to the server before closing down, otherwise
     //the close session event will be eaten out before we start CONNECTING state
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     while (newZK.getState() != States.CONNECTED
-         && System.currentTimeMillis() - start < 1000) {
+         && EnvironmentEdgeManager.currentTime() - start < 1000) {
        Thread.sleep(1);
     }
     newZK.close();
@@ -3971,11 +3972,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
   public void assertRegionOnServer(
       final RegionInfo hri, final ServerName server,
       final long timeout) throws IOException, InterruptedException {
-    long timeoutTime = System.currentTimeMillis() + timeout;
+    long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout;
     while (true) {
       List<RegionInfo> regions = getAdmin().getRegions(server);
       if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       if (now > timeoutTime) break;
       Thread.sleep(10);
     }
@@ -3990,7 +3991,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
   public void assertRegionOnlyOnServer(
       final RegionInfo hri, final ServerName server,
       final long timeout) throws IOException, InterruptedException {
-    long timeoutTime = System.currentTimeMillis() + timeout;
+    long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout;
     while (true) {
       List<RegionInfo> regions = getAdmin().getRegions(server);
       if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
@@ -4009,7 +4010,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
         }
         return; // good, we are happy
       }
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       if (now > timeoutTime) break;
       Thread.sleep(10);
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
index cbfadb5..69fb21c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * This class runs performance benchmarks for {@link HFile}.
@@ -331,7 +332,7 @@ public class HFilePerformanceEvaluation {
     long run() throws Exception {
       long elapsedTime;
       setUp();
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       try {
         for (int i = 0; i < totalRows; i++) {
           if (i > 0 && i % getReportingPeriod() == 0) {
@@ -339,7 +340,7 @@ public class HFilePerformanceEvaluation {
           }
           doRow(i);
         }
-        elapsedTime = System.currentTimeMillis() - startTime;
+        elapsedTime = EnvironmentEdgeManager.currentTime() - startTime;
       } finally {
         tearDown();
       }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 4d40ca4..990867e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -445,9 +446,9 @@ public class MiniHBaseCluster extends HBaseCluster {
     JVMClusterUtil.RegionServerThread t =  startRegionServer();
     ServerName rsServerName = t.getRegionServer().getServerName();
 
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     ClusterMetrics clusterStatus = getClusterMetrics();
-    while ((System.currentTimeMillis() - start) < timeout) {
+    while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
       if (clusterStatus != null && clusterStatus.getLiveServerMetrics().containsKey(rsServerName)) {
         return t;
       }
@@ -649,9 +650,9 @@ public class MiniHBaseCluster extends HBaseCluster {
   @Override
   public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
     List<JVMClusterUtil.MasterThread> mts;
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     while (!(mts = getMasterThreads()).isEmpty()
-        && (System.currentTimeMillis() - start) < timeout) {
+        && (EnvironmentEdgeManager.currentTime() - start) < timeout) {
       for (JVMClusterUtil.MasterThread mt : mts) {
         if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) {
           return true;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
index 99aef64..5268d3d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java
@@ -26,6 +26,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,9 +65,9 @@ public abstract class MultithreadedTestUtil {
     }
 
     public void waitFor(long millis) throws Exception {
-      long endTime = System.currentTimeMillis() + millis;
+      long endTime = EnvironmentEdgeManager.currentTime() + millis;
       while (!stopped) {
-        long left = endTime - System.currentTimeMillis();
+        long left = endTime - EnvironmentEdgeManager.currentTime();
         if (left <= 0) break;
         synchronized (this) {
           checkException();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
index f919db7..97d326a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,7 +67,7 @@ public class PerformanceEvaluationCommons {
 
   public static void concurrentReads(final Runnable r) {
     final int count = 1;
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     List<Thread> threads = new ArrayList<>(count);
     for (int i = 0; i < count; i++) {
       threads.add(new Thread(r, "concurrentRead-" + i));
@@ -81,6 +82,6 @@ public class PerformanceEvaluationCommons {
         e.printStackTrace();
       }
     }
-    LOG.info("Test took " + (System.currentTimeMillis() - now));
+    LOG.info("Test took " + (EnvironmentEdgeManager.currentTime() - now));
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
index 44b06de..882bc9d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.ClassRule;
@@ -117,10 +118,10 @@ public class TestGlobalMemStoreSize {
         flush(r, server);
       }
       LOG.info("Post flush on " + server.getServerName());
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       long timeout = now + 1000;
       while(server.getRegionServerAccounting().getGlobalMemStoreDataSize() != 0 &&
-          timeout < System.currentTimeMillis()) {
+          timeout < EnvironmentEdgeManager.currentTime()) {
         Threads.sleep(10);
       }
       long size = server.getRegionServerAccounting().getGlobalMemStoreDataSize();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
index d363181..fa2a337 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.ClassRule;
@@ -299,7 +300,7 @@ public class TestIOFencing {
         oldHri, compactionDescriptor, compactingRegion.getMVCC());
 
       // Wait till flush has happened, otherwise there won't be multiple store files
-      long startWaitTime = System.currentTimeMillis();
+      long startWaitTime = EnvironmentEdgeManager.currentTime();
       while (compactingRegion.getEarliestFlushTimeForAllStores() <= lastFlushTime ||
           compactingRegion.countStoreFiles() <= 1) {
         LOG.info("Waiting for the region to flush " +
@@ -307,7 +308,7 @@ public class TestIOFencing {
         Thread.sleep(1000);
         admin.flush(table.getName());
         assertTrue("Timed out waiting for the region to flush",
-          System.currentTimeMillis() - startWaitTime < 30000);
+          EnvironmentEdgeManager.currentTime() - startWaitTime < 30000);
       }
       assertTrue(compactingRegion.countStoreFiles() > 1);
       final byte REGION_NAME[] = compactingRegion.getRegionInfo().getRegionName();
@@ -321,7 +322,7 @@ public class TestIOFencing {
       LOG.info("Killing region server ZK lease");
       TEST_UTIL.expireRegionServerSession(0);
       CompactionBlockerRegion newRegion = null;
-      startWaitTime = System.currentTimeMillis();
+      startWaitTime = EnvironmentEdgeManager.currentTime();
       LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));
 
       // wait for region to be assigned and to go out of log replay if applicable
@@ -355,11 +356,11 @@ public class TestIOFencing {
       TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT,
         FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
       admin.majorCompact(TABLE_NAME);
-      startWaitTime = System.currentTimeMillis();
+      startWaitTime = EnvironmentEdgeManager.currentTime();
       while (newRegion.compactCount.get() == 0) {
         Thread.sleep(1000);
         assertTrue("New region never compacted",
-          System.currentTimeMillis() - startWaitTime < 180000);
+          EnvironmentEdgeManager.currentTime() - startWaitTime < 180000);
       }
       int count;
       for (int i = 0;; i++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 28ce7d8..f317fdf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -156,7 +156,7 @@ public class TestMetaTableAccessor {
     // it as a fail. We can't put that in the @Test tag as we want to close
     // the threads nicely
     final long timeOut = 180000;
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
 
     try {
       // Make sure reader and writer are working.
@@ -171,7 +171,7 @@ public class TestMetaTableAccessor {
         int index = -1;
         do {
           index = UTIL.getMiniHBaseCluster().getServerWithMeta();
-        } while (index == -1 && startTime + timeOut < System.currentTimeMillis());
+        } while (index == -1 && startTime + timeOut < EnvironmentEdgeManager.currentTime());
 
         if (index != -1) {
           UTIL.getMiniHBaseCluster().abortRegionServer(index);
@@ -190,7 +190,7 @@ public class TestMetaTableAccessor {
       writer.join();
       t.close();
     }
-    long exeTime = System.currentTimeMillis() - startTime;
+    long exeTime = EnvironmentEdgeManager.currentTime() - startTime;
     assertTrue("Timeout: test took " + exeTime / 1000 + " sec", exeTime < timeOut);
   }
 
@@ -304,7 +304,7 @@ public class TestMetaTableAccessor {
     ServerName serverName1 = ServerName.valueOf("bar", 60010, random.nextLong());
     ServerName serverName100 = ServerName.valueOf("baz", 60010, random.nextLong());
 
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
       .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
       .setRegionId(regionId).setReplicaId(0).build();
@@ -379,7 +379,7 @@ public class TestMetaTableAccessor {
 
   @Test
   public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
       .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
       .setRegionId(regionId).setReplicaId(0).build();
@@ -444,7 +444,7 @@ public class TestMetaTableAccessor {
    */
   @Test
   public void testMastersSystemTimeIsUsedInUpdateLocations() throws IOException {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     RegionInfo regionInfo = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
       .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
       .setRegionId(regionId).setReplicaId(0).build();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
index 9274fa0..db72ae7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
@@ -25,6 +25,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -60,7 +61,7 @@ public class TestMetaTableLocator {
   private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableLocator.class);
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
   private static final ServerName SN =
-    ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
+    ServerName.valueOf("example.org", 1234, EnvironmentEdgeManager.currentTime());
   private ZKWatcher watcher;
   private Abortable abortable;
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
index e73272e..e2a8adb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -121,7 +122,7 @@ public class TestMetaUpdatesGoToPriorityQueue {
     }
     SpyingRpcScheduler scheduler = (SpyingRpcScheduler) rs.getRpcServer().getScheduler();
     long prevCalls = scheduler.numPriorityCalls;
-    long time = System.currentTimeMillis();
+    long time = EnvironmentEdgeManager.currentTime();
     Put putParent = MetaTableAccessor.makePutFromRegionInfo(
       RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time);
     MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
index 69cf139..90d84be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -71,7 +72,7 @@ public class TestSerialization {
     byte[] row = Bytes.toBytes(name);
     byte[] fam = Bytes.toBytes("fam");
     byte[] qf = Bytes.toBytes("qf");
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     byte[] val = Bytes.toBytes("val");
     KeyValue kv = new KeyValue(row, fam, qf, ts, val);
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -202,8 +203,7 @@ public class TestSerialization {
     byte[] row = Bytes.toBytes("row");
     byte[] fam = Bytes.toBytes("fam");
     byte[] qf1 = Bytes.toBytes("qf1");
-
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     int maxVersions = 2;
 
     Get get = new Get(row);
@@ -241,8 +241,7 @@ public class TestSerialization {
     byte[] stopRow = Bytes.toBytes("stopRow");
     byte[] fam = Bytes.toBytes("fam");
     byte[] qf1 = Bytes.toBytes("qf1");
-
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     int maxVersions = 2;
 
     Scan scan = new Scan().withStartRow(startRow).withStopRow(stopRow);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
index c0eacae..fba8f0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -149,7 +150,7 @@ public class TestZooKeeper {
    * Make sure we can use the cluster
    */
   private void testSanity(final String testName) throws Exception {
-    String tableName = testName + "_" + System.currentTimeMillis();
+    String tableName = testName + "_" + EnvironmentEdgeManager.currentTime();
     TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
         .setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam")).build();
     LOG.info("Creating table " + tableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index 5754015..c8e9a56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -486,14 +487,14 @@ public class TestHFileArchiving {
 
   private void assertArchiveFiles(FileSystem fs, List<String> storeFiles, long timeout)
           throws IOException {
-    long end = System.currentTimeMillis() + timeout;
+    long end = EnvironmentEdgeManager.currentTime() + timeout;
     Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
     List<String> archivedFiles = new ArrayList<>();
 
     // We have to ensure that the DeleteTableHandler is finished. HBaseAdmin.deleteXXX()
     // can return before all files
     // are archived. We should fix HBASE-5487 and fix synchronous operations from admin.
-    while (System.currentTimeMillis() < end) {
+    while (EnvironmentEdgeManager.currentTime() < end) {
       archivedFiles = getAllFileNames(fs, archiveDir);
       if (archivedFiles.size() >= storeFiles.size()) {
         break;
@@ -595,8 +596,8 @@ public class TestHFileArchiving {
     try {
       choreService.scheduleChore(cleaner);
       // Keep creating/archiving new files while the cleaner is running in the other thread
-      long startTime = System.currentTimeMillis();
-      for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
+      long startTime = EnvironmentEdgeManager.currentTime();
+      for (long fid = 0; (EnvironmentEdgeManager.currentTime() - startTime) < TEST_TIME; ++fid) {
         Path file = new Path(familyDir,  String.valueOf(fid));
         Path sourceFile = new Path(rootDir, file);
         Path archiveFile = new Path(archiveDir, file);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java
index e8c0167..0340bdc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Test;
 
 public class CloneSnapshotFromClientAfterSplittingRegionTestBase
@@ -53,7 +54,7 @@ public class CloneSnapshotFromClientAfterSplittingRegionTestBase
 
       // Clone the snapshot to another table
       TableName clonedTableName =
-        TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
+        TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
       admin.cloneSnapshot(snapshotName2, clonedTableName);
       SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName);
 
@@ -93,7 +94,7 @@ public class CloneSnapshotFromClientAfterSplittingRegionTestBase
 
       // Clone the snapshot to another table
       TableName clonedTableName =
-        TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
+        TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
       admin.cloneSnapshot(snapshotName2, clonedTableName);
       SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java
index 254aeac..1d7e67c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Test;
 
 public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase
@@ -31,7 +32,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase
   public void testCloneLinksAfterDelete() throws IOException, InterruptedException {
     // Clone a table from the first snapshot
     final TableName clonedTableName =
-      TableName.valueOf(getValidMethodName() + "1-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "1-" + EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName0, clonedTableName);
     verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
 
@@ -41,7 +42,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase
 
     // Clone the snapshot of the cloned table
     final TableName clonedTableName2 =
-      TableName.valueOf(getValidMethodName() + "2-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "2-" + EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName2, clonedTableName2);
     verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
     admin.disableTable(clonedTableName2);
@@ -69,7 +70,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase
 
     // Clone a new table from cloned
     final TableName clonedTableName3 =
-      TableName.valueOf(getValidMethodName() + "3-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "3-" + EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName2, clonedTableName3);
     verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java
index 04df8e4..d660fff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java
@@ -21,15 +21,16 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Test;
 
 public class CloneSnapshotFromClientErrorTestBase extends CloneSnapshotFromClientTestBase {
 
   @Test(expected = SnapshotDoesNotExistException.class)
   public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
-    String snapshotName = "random-snapshot-" + System.currentTimeMillis();
+    String snapshotName = "random-snapshot-" + EnvironmentEdgeManager.currentTime();
     final TableName tableName =
-      TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName, tableName);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java
index cb3ecd5..cf25d66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Test;
 
 public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClientTestBase {
@@ -28,7 +29,7 @@ public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClie
   @Test
   public void testCloneSnapshot() throws IOException, InterruptedException {
     TableName clonedTableName =
-      TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
     testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
     testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
     testCloneSnapshot(clonedTableName, emptySnapshot, 0);
@@ -50,10 +51,10 @@ public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClie
 
   @Test
   public void testCloneSnapshotCrossNamespace() throws IOException, InterruptedException {
-    String nsName = getValidMethodName() + "_ns_" + System.currentTimeMillis();
+    String nsName = getValidMethodName() + "_ns_" + EnvironmentEdgeManager.currentTime();
     admin.createNamespace(NamespaceDescriptor.create(nsName).build());
     final TableName clonedTableName =
-      TableName.valueOf(nsName, getValidMethodName() + "-" + System.currentTimeMillis());
+      TableName.valueOf(nsName, getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
     testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
     testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
     testCloneSnapshot(clonedTableName, emptySnapshot, 0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java
index 00cc1a0..611d900 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -84,8 +85,7 @@ public class CloneSnapshotFromClientTestBase {
   @Before
   public void setup() throws Exception {
     this.admin = TEST_UTIL.getAdmin();
-
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     tableName = TableName.valueOf(getValidMethodName() + tid);
     emptySnapshot = "emptySnaptb-" + tid;
     snapshotName0 = "snaptb0-" + tid;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java
index 07625f9..101ba9c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Test;
 
 public class RestoreSnapshotFromClientCloneTestBase extends RestoreSnapshotFromClientTestBase {
@@ -27,7 +28,7 @@ public class RestoreSnapshotFromClientCloneTestBase extends RestoreSnapshotFromC
   @Test
   public void testCloneSnapshotOfCloned() throws IOException, InterruptedException {
     TableName clonedTableName =
-      TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName0, clonedTableName);
     verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
     SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java
index 729679c..627de2a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Test;
 
 public class RestoreSnapshotFromClientSimpleTestBase extends RestoreSnapshotFromClientTestBase {
@@ -64,7 +65,7 @@ public class RestoreSnapshotFromClientSimpleTestBase extends RestoreSnapshotFrom
   public void testCorruptedSnapshot() throws IOException, InterruptedException {
     SnapshotTestingUtils.corruptSnapshot(TEST_UTIL, snapshotName0);
     TableName cloneName =
-      TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis());
+      TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime());
     try {
       admin.cloneSnapshot(snapshotName0, cloneName);
       fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java
index 666d67d..0b1ffde 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -81,8 +82,7 @@ public class RestoreSnapshotFromClientTestBase {
   @Before
   public void setup() throws Exception {
     this.admin = TEST_UTIL.getAdmin();
-
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     tableName = TableName.valueOf(getValidMethodName() + "-" + tid);
     emptySnapshot = "emptySnaptb-" + tid;
     snapshotName0 = "snaptb0-" + tid;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index b0271a0..102e7e2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.junit.Assert;
@@ -274,8 +275,8 @@ public class TestAdmin2 extends TestAdminBase {
     }
     boolean isInList = ProtobufUtil.getOnlineRegions(
       rs.getRSRpcServices()).contains(info);
-    long timeout = System.currentTimeMillis() + 10000;
-    while ((System.currentTimeMillis() < timeout) && (isInList)) {
+    long timeout = EnvironmentEdgeManager.currentTime() + 10000;
+    while ((EnvironmentEdgeManager.currentTime() < timeout) && (isInList)) {
       Thread.sleep(100);
       isInList = ProtobufUtil.getOnlineRegions(
         rs.getRSRpcServices()).contains(info);
@@ -328,8 +329,8 @@ public class TestAdmin2 extends TestAdminBase {
 
     boolean isInList = ProtobufUtil.getOnlineRegions(
       rs.getRSRpcServices()).contains(info);
-    long timeout = System.currentTimeMillis() + 10000;
-    while ((System.currentTimeMillis() < timeout) && (isInList)) {
+    long timeout = EnvironmentEdgeManager.currentTime() + 10000;
+    while ((EnvironmentEdgeManager.currentTime() < timeout) && (isInList)) {
       Thread.sleep(100);
       isInList = ProtobufUtil.getOnlineRegions(
         rs.getRSRpcServices()).contains(info);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java
index 441d401..e7be08e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -359,7 +360,7 @@ public class TestAdmin3 extends TestAdminBase {
     // Now make it so at least the table exists and then do tests against a
     // nonexistent column family -- see if we get right exceptions.
     final TableName tableName =
-      TableName.valueOf(name.getMethodName() + System.currentTimeMillis());
+      TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime());
     TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
       .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build();
     ADMIN.createTable(htd);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index c61a289..6ff2d22 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -112,14 +112,14 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
 
     // wait till the table is assigned
     HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-    long timeoutTime = System.currentTimeMillis() + 3000;
+    long timeoutTime = EnvironmentEdgeManager.currentTime() + 3000;
     while (true) {
       List<RegionInfo> regions =
           master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
       if (regions.size() > 3) {
         return regions.get(2);
       }
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       if (now > timeoutTime) {
         fail("Could not find an online region");
       }
@@ -163,13 +163,13 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     assertTrue(destServerName != null && !destServerName.equals(serverName));
     admin.move(hri.getRegionName(), destServerName).get();
 
-    long timeoutTime = System.currentTimeMillis() + 30000;
+    long timeoutTime = EnvironmentEdgeManager.currentTime() + 30000;
     while (true) {
       ServerName sn = rawAdmin.getRegionLocation(hri.getRegionName()).get().getServerName();
       if (sn != null && sn.equals(destServerName)) {
         break;
       }
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       if (now > timeoutTime) {
         fail("Failed to move the region in time: " + hri);
       }
@@ -426,14 +426,14 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
       }
     }
 
-    long curt = System.currentTimeMillis();
+    long curt = EnvironmentEdgeManager.currentTime();
     long waitTime = 10000;
     long endt = curt + waitTime;
     CompactionState state = admin.getCompactionState(tableName).get();
     while (state == CompactionState.NONE && curt < endt) {
       Thread.sleep(1);
       state = admin.getCompactionState(tableName).get();
-      curt = System.currentTimeMillis();
+      curt = EnvironmentEdgeManager.currentTime();
     }
     // Now, should have the right compaction state,
     // otherwise, the compaction should have already been done
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java
index 2952fa3..50f30d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -391,7 +392,7 @@ public class TestAsyncTable {
   @Deprecated
   public void testCheckAndMutateWithTimeRangeForOldApi() throws Exception {
     AsyncTable<?> table = getTable.get();
-    final long ts = System.currentTimeMillis() / 2;
+    final long ts = EnvironmentEdgeManager.currentTime() / 2;
     Put put = new Put(row);
     put.addColumn(FAMILY, QUALIFIER, ts, VALUE);
 
@@ -750,7 +751,7 @@ public class TestAsyncTable {
   @Test
   public void testCheckAndMutateWithTimeRange() throws Exception {
     AsyncTable<?> table = getTable.get();
-    final long ts = System.currentTimeMillis() / 2;
+    final long ts = EnvironmentEdgeManager.currentTime() / 2;
     Put put = new Put(row);
     put.addColumn(FAMILY, QUALIFIER, ts, VALUE);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
index 62e6f57..5f9f8f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.junit.Assert;
 import org.junit.ClassRule;
@@ -243,7 +244,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
       admin.flush(tableName).join();
     }
     admin.majorCompact(tableName).join();
-    long curt = System.currentTimeMillis();
+    long curt = EnvironmentEdgeManager.currentTime();
     long waitTime = 10000;
     long endt = curt + waitTime;
     CompactionState state = admin.getCompactionState(tableName).get();
@@ -251,7 +252,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
     while (state == CompactionState.NONE && curt < endt) {
       Thread.sleep(100);
       state = admin.getCompactionState(tableName).get();
-      curt = System.currentTimeMillis();
+      curt = EnvironmentEdgeManager.currentTime();
       LOG.info("Current compaction state 2 is " + state);
     }
     // Now, should have the right compaction state, let's wait until the compaction is done
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
index b8c994c..48b16ff 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -1478,12 +1479,13 @@ public class TestBlockEvictionFromClient {
 
   private void waitForStoreFileCount(HStore store, int count, int timeout)
       throws InterruptedException {
-    long start = System.currentTimeMillis();
-    while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) {
+    long start = EnvironmentEdgeManager.currentTime();
+    while (start + timeout > EnvironmentEdgeManager.currentTime() &&
+        store.getStorefilesCount() != count) {
       Thread.sleep(100);
     }
-    System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" +
-        store.getStorefilesCount());
+    System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() +
+      ", cur=" + store.getStorefilesCount());
     assertEquals(count, store.getStorefilesCount());
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
index 888b3d2..a47ebf3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -109,9 +110,9 @@ public class TestClientScannerRPCTimeout {
     result = scanner.next();
     assertTrue("Expected row: row-1", Bytes.equals(r1, result.getRow()));
     LOG.info("Got expected first row");
-    long t1 = System.currentTimeMillis();
+    long t1 = EnvironmentEdgeManager.currentTime();
     result = scanner.next();
-    assertTrue((System.currentTimeMillis() - t1) > rpcTimeout);
+    assertTrue((EnvironmentEdgeManager.currentTime() - t1) > rpcTimeout);
     assertTrue("Expected row: row-2", Bytes.equals(r2, result.getRow()));
     RSRpcServicesWithScanTimeout.seqNoToSleepOn = -1;// No need of sleep
     result = scanner.next();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java
index 859c37c..3eecb69 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java
@@ -244,7 +244,7 @@ public class TestConnection {
     table.put(put);
 
     ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
-    mee.setValue(System.currentTimeMillis());
+    mee.setValue(EnvironmentEdgeManager.currentTime());
     EnvironmentEdgeManager.injectEdge(mee);
     LOG.info("first get");
     table.get(new Get(ROW));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index bd56e5f..87ad7db 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -216,7 +217,7 @@ public class TestFromClientSide extends FromClientSideBase {
       .build();
     TEST_UTIL.getAdmin().createTable(tableDescriptor);
     try (Table h = TEST_UTIL.getConnection().getTable(tableName)) {
-      long ts = System.currentTimeMillis();
+      long ts = EnvironmentEdgeManager.currentTime();
       Put p = new Put(T1, ts);
       p.addColumn(FAMILY, C0, T1);
       h.put(p);
@@ -275,7 +276,7 @@ public class TestFromClientSide extends FromClientSideBase {
 
     try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) {
       // future timestamp
-      long ts = System.currentTimeMillis() * 2;
+      long ts = EnvironmentEdgeManager.currentTime() * 2;
       Put put = new Put(ROW, ts);
       put.addColumn(FAMILY, COLUMN, VALUE);
       table.put(put);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index d949fdb..6b7a806 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -218,7 +219,7 @@ public class TestFromClientSide3 {
       byte[] row = Bytes.toBytes("SpecifiedRow");
       byte[] qual0 = Bytes.toBytes("qual0");
       byte[] qual1 = Bytes.toBytes("qual1");
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       Delete d = new Delete(row, now);
       table.delete(d);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
index 3b9a671..40ba385 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
@@ -953,7 +953,7 @@ public class TestFromClientSide5 extends FromClientSideBase {
   @Test
   public void testCheckAndMutateWithTimeRange() throws IOException {
     try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) {
-      final long ts = System.currentTimeMillis() / 2;
+      final long ts = EnvironmentEdgeManager.currentTime() / 2;
       Put put = new Put(ROW);
       put.addColumn(FAMILY, QUALIFIER, ts, VALUE);
 
@@ -1487,12 +1487,13 @@ public class TestFromClientSide5 extends FromClientSideBase {
 
   private void waitForStoreFileCount(HStore store, int count, int timeout)
       throws InterruptedException {
-    long start = System.currentTimeMillis();
-    while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) {
+    long start = EnvironmentEdgeManager.currentTime();
+    while (start + timeout > EnvironmentEdgeManager.currentTime() &&
+        store.getStorefilesCount() != count) {
       Thread.sleep(100);
     }
-    System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" +
-        store.getStorefilesCount());
+    System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() +
+      ", cur=" + store.getStorefilesCount());
     assertEquals(count, store.getStorefilesCount());
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
index b3b4e06..0195da5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -112,10 +113,10 @@ public class TestMobCloneSnapshotFromClientCloneLinksAfterDelete
     // delay the flush to make sure
     delayFlush = true;
     SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 20, FAMILY);
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     String snapshotName3 = "snaptb3-" + tid;
     TableName clonedTableName3 =
-      TableName.valueOf(name.getMethodName() + System.currentTimeMillis());
+      TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime());
     admin.snapshot(snapshotName3, tableName);
     delayFlush = false;
     int snapshot3Rows = -1;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java
index dd64854..12defbe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -66,7 +67,7 @@ public class TestMutationGetCellBuilder {
     final byte[] uselessRowKey = Bytes.toBytes("123");
     final byte[] family = Bytes.toBytes("cf");
     final byte[] qualifier = Bytes.toBytes("foo");
-    final long now = System.currentTimeMillis();
+    final long now = EnvironmentEdgeManager.currentTime();
     try (Table table = TEST_UTIL.createTable(tableName, family)) {
       TEST_UTIL.waitTableAvailable(tableName.getName(), 5000);
       // put one row
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
index b558358..9f2cc01 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -46,7 +47,7 @@ public class TestPutDeleteEtcCellIteration {
       HBaseClassTestRule.forClass(TestPutDeleteEtcCellIteration.class);
 
   private static final byte [] ROW = new byte [] {'r'};
-  private static final long TIMESTAMP = System.currentTimeMillis();
+  private static final long TIMESTAMP = EnvironmentEdgeManager.currentTime();
   private static final int COUNT = 10;
 
   @Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index 43d1418..c7fb227 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -762,7 +762,7 @@ public class TestScannersFromClientSide {
   @Test
   public void testReadExpiredDataForRawScan() throws IOException {
     TableName tableName = name.getTableName();
-    long ts = System.currentTimeMillis() - 10000;
+    long ts = EnvironmentEdgeManager.currentTime() - 10000;
     byte[] value = Bytes.toBytes("expired");
     try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) {
       table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, ts, value));
@@ -782,7 +782,7 @@ public class TestScannersFromClientSide {
   @Test
   public void testScanWithColumnsAndFilterAndVersion() throws IOException {
     TableName tableName = name.getTableName();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 4)) {
       for (int i = 0; i < 4; i++) {
         Put put = new Put(ROW);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
index cfa0b6d..5b1b416 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -264,7 +265,7 @@ public class TestSnapshotCloneIndependence {
         countOriginalTable, clonedTableRowCount);
 
       // Attempt to add data to the test
-      Put p = new Put(Bytes.toBytes("new-row-" + System.currentTimeMillis()));
+      Put p = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime()));
       p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
       originalTable.put(p);
 
@@ -275,7 +276,7 @@ public class TestSnapshotCloneIndependence {
         "The row count of the cloned table changed as a result of addition to the original",
         clonedTableRowCount, countRows(clonedTable));
 
-      Put p2 = new Put(Bytes.toBytes("new-row-" + System.currentTimeMillis()));
+      Put p2 = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime()));
       p2.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
       clonedTable.put(p2);
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
index 4c5afb1..5905419 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -149,7 +150,7 @@ public class TestSnapshotMetadata {
    *  Create a table that has non-default properties so we can see if they hold
    */
   private void createTableWithNonDefaultProperties() throws Exception {
-    final long startTime = System.currentTimeMillis();
+    final long startTime = EnvironmentEdgeManager.currentTime();
     final String sourceTableNameAsString = STRING_TABLE_NAME + startTime;
     originalTableName = TableName.valueOf(sourceTableNameAsString);
 
@@ -189,7 +190,7 @@ public class TestSnapshotMetadata {
     final String clonedTableNameAsString = "clone" + originalTableName;
     final TableName clonedTableName = TableName.valueOf(clonedTableNameAsString);
     final String snapshotNameAsString = "snapshot" + originalTableName
-        + System.currentTimeMillis();
+        + EnvironmentEdgeManager.currentTime();
     final String snapshotName = snapshotNameAsString;
 
     // restore the snapshot into a cloned table and examine the output
@@ -278,7 +279,7 @@ public class TestSnapshotMetadata {
 
     // take a "disabled" snapshot
     final String snapshotNameAsString = "snapshot" + originalTableName
-        + System.currentTimeMillis();
+        + EnvironmentEdgeManager.currentTime();
 
     SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName,
       familiesWithDataList, emptyFamiliesList, snapshotNameAsString, rootDir, fs,
@@ -287,7 +288,7 @@ public class TestSnapshotMetadata {
     admin.enableTable(originalTableName);
 
     if (changeMetadata) {
-      final String newFamilyNameAsString = "newFamily" + System.currentTimeMillis();
+      final String newFamilyNameAsString = "newFamily" + EnvironmentEdgeManager.currentTime();
       final byte[] newFamilyName = Bytes.toBytes(newFamilyNameAsString);
 
       admin.disableTable(originalTableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
index aa2ecd4..617ee30 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -156,7 +157,7 @@ public class TestSnapshotTemporaryDirectory {
   @Test
   public void testRestoreDisabledSnapshot()
       throws IOException, InterruptedException {
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     TableName tableName = TableName.valueOf("testtb-" + tid);
     String emptySnapshot = "emptySnaptb-" + tid;
     String snapshotName0 = "snaptb0-" + tid;
@@ -223,7 +224,7 @@ public class TestSnapshotTemporaryDirectory {
   @Test
   public void testRestoreEnabledSnapshot()
       throws IOException, InterruptedException {
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     TableName tableName = TableName.valueOf("testtb-" + tid);
     String emptySnapshot = "emptySnaptb-" + tid;
     String snapshotName0 = "snaptb0-" + tid;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
index 1788421..21d069f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
@@ -448,7 +449,7 @@ public class TestTableSnapshotScanner {
         }
       });
       // set file modify time and then run cleaner
-      long time = System.currentTimeMillis() - TimeToLiveHFileCleaner.DEFAULT_TTL * 1000;
+      long time = EnvironmentEdgeManager.currentTime() - TimeToLiveHFileCleaner.DEFAULT_TTL * 1000;
       traverseAndSetFileTime(HFileArchiveUtil.getArchivePath(conf), time);
       UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().runCleaner();
       // scan snapshot
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
index 5a2a893..2b8c3a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -100,14 +101,14 @@ public class TestEntityLocks {
   }
 
   private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) {
-    long startMillis = System.currentTimeMillis();
+    long startMillis = EnvironmentEdgeManager.currentTime();
     while (lock.isLocked()) {
       LOG.info("Sleeping...");
       Threads.sleepWithoutInterrupt(100);
       if (!lock.isLocked()) {
         return true;
       }
-      if (System.currentTimeMillis() - startMillis > maxWaitTimeMillis) {
+      if (EnvironmentEdgeManager.currentTime() - startMillis > maxWaitTimeMillis) {
         LOG.info("Timedout...");
         return false;
       }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
index bba27fe..73f5ca0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.codec.KeyValueCodec;
 import org.apache.hadoop.hbase.codec.MessageCodec;
 import org.apache.hadoop.hbase.io.CellOutputStream;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Do basic codec performance eval.
@@ -68,13 +69,13 @@ public class CodecPerformance {
   static byte [] runEncoderTest(final int index, final int initialBufferSize,
       final ByteArrayOutputStream baos, final CellOutputStream encoder, final Cell [] cells)
   throws IOException {
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < cells.length; i++) {
       encoder.write(cells[i]);
     }
     encoder.flush();
     LOG.info("" + index + " encoded count=" + cells.length + " in " +
-      (System.currentTimeMillis() - startTime) + "ms for encoder " + encoder);
+      (EnvironmentEdgeManager.currentTime() - startTime) + "ms for encoder " + encoder);
     // Ensure we did not have to grow the backing buffer.
     assertTrue(baos.size() < initialBufferSize);
     return baos.toByteArray();
@@ -83,12 +84,12 @@ public class CodecPerformance {
   static Cell [] runDecoderTest(final int index, final int count, final CellScanner decoder)
   throws IOException {
     Cell [] cells = new Cell[count];
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     for (int i = 0; decoder.advance(); i++) {
       cells[i] = decoder.current();
     }
     LOG.info("" + index + " decoded count=" + cells.length + " in " +
-      (System.currentTimeMillis() - startTime) + "ms for decoder " + decoder);
+      (EnvironmentEdgeManager.currentTime() - startTime) + "ms for decoder " + decoder);
     // Ensure we did not have to grow the backing buffer.
     assertTrue(cells.length == count);
     return cells;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java
index d7ac38a..396d6ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.junit.AfterClass;
@@ -111,14 +112,14 @@ public class TestCoprocessorMetrics {
     public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
                                TableDescriptor desc, RegionInfo[] regions) throws IOException {
       // we rely on the fact that there is only 1 instance of our MasterObserver
-      this.start = System.currentTimeMillis();
+      this.start = EnvironmentEdgeManager.currentTime();
     }
 
     @Override
     public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
                                 TableDescriptor desc, RegionInfo[] regions) throws IOException {
       if (this.start > 0) {
-        long time = System.currentTimeMillis() - start;
+        long time = EnvironmentEdgeManager.currentTime() - start;
         LOG.info("Create table took: " + time);
         createTableTimer.updateMillis(time);
       }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java
index 2fc4dea..4517b25 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -50,10 +51,9 @@ public class TestCoprocessorStop {
 
   private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorStop.class);
   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final String MASTER_FILE =
-                              "master" + System.currentTimeMillis();
-  private static final String REGIONSERVER_FILE =
-                              "regionserver" + System.currentTimeMillis();
+  private static final String MASTER_FILE = "master" + EnvironmentEdgeManager.currentTime();
+  private static final String REGIONSERVER_FILE = "regionserver" +
+    EnvironmentEdgeManager.currentTime();
 
   public static class FooCoprocessor implements MasterCoprocessor, RegionServerCoprocessor {
     @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
index ff9d8a1..91da8a2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
@@ -671,7 +671,7 @@ public class TestRegionObserverInterface {
     EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
 
     // force a compaction
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     admin.flush(compactTable);
     // wait for flush
     for (int i = 0; i < 10; i++) {
@@ -954,7 +954,7 @@ public class TestRegionObserverInterface {
     HFileContext context = new HFileContextBuilder().build();
     HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
         .withFileContext(context).create();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       for (int i = 1; i <= 9; i++) {
         KeyValue kv =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
index dae8cf7..4938e7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
@@ -70,7 +71,7 @@ public class TestRegionObserverStacking extends TestCase {
         final Put put, final WALEdit edit,
         final Durability durability)
         throws IOException {
-      id = System.currentTimeMillis();
+      id = EnvironmentEdgeManager.currentTime();
       try {
         Thread.sleep(10);
       } catch (InterruptedException ex) {
@@ -91,7 +92,7 @@ public class TestRegionObserverStacking extends TestCase {
         final Put put, final WALEdit edit,
         final Durability durability)
         throws IOException {
-      id = System.currentTimeMillis();
+      id = EnvironmentEdgeManager.currentTime();
       try {
         Thread.sleep(10);
       } catch (InterruptedException ex) {
@@ -112,7 +113,7 @@ public class TestRegionObserverStacking extends TestCase {
         final Put put, final WALEdit edit,
         final Durability durability)
         throws IOException {
-      id = System.currentTimeMillis();
+      id = EnvironmentEdgeManager.currentTime();
       try {
         Thread.sleep(10);
       } catch (InterruptedException ex) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
index 6fbd3b5..4ab00ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
@@ -142,10 +142,9 @@ public class TestWALObserver {
     this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
     this.hbaseRootDir = CommonFSUtils.getRootDir(conf);
     this.hbaseWALRootDir = CommonFSUtils.getWALRootDir(conf);
-    this.oldLogDir = new Path(this.hbaseWALRootDir,
-        HConstants.HREGION_OLDLOGDIR_NAME);
+    this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
     String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010,
-        System.currentTimeMillis()).toString();
+      EnvironmentEdgeManager.currentTime()).toString();
     this.logDir = new Path(this.hbaseWALRootDir,
       AbstractFSWALProvider.getWALDirectoryName(serverName));
 
@@ -352,7 +351,8 @@ public class TestWALObserver {
         LOG.info("WALSplit path == " + p);
         // Make a new wal for new region open.
         final WALFactory wals2 = new WALFactory(conf,
-            ServerName.valueOf(currentTest.getMethodName() + "2", 16010, System.currentTimeMillis())
+            ServerName.valueOf(currentTest.getMethodName() + "2", 16010,
+              EnvironmentEdgeManager.currentTime())
                 .toString());
         WAL wal2 = wals2.getWAL(null);
         HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
index a13aed2..0dbe6d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -67,7 +68,7 @@ public class TestDependentColumnFilter {
   private static final byte[][] FAMILIES = {
     Bytes.toBytes("familyOne"),Bytes.toBytes("familyTwo")
   };
-  private static final long STAMP_BASE = System.currentTimeMillis();
+  private static final long STAMP_BASE = EnvironmentEdgeManager.currentTime();
   private static final long[] STAMPS = {
     STAMP_BASE-100, STAMP_BASE-200, STAMP_BASE-300
   };
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java
index b62bb8e..ad1bb8a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.filter.MultiRowRangeFilter.RowRange;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -304,8 +305,8 @@ public class TestFilterSerialization {
 
     // Non-empty timestamp list
     LinkedList<Long> list = new LinkedList<>();
-    list.add(System.currentTimeMillis());
-    list.add(System.currentTimeMillis());
+    list.add(EnvironmentEdgeManager.currentTime());
+    list.add(EnvironmentEdgeManager.currentTime());
     timestampsFilter = new TimestampsFilter(list);
     assertTrue(timestampsFilter.areSerializedFieldsEqual(
       ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter))));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java
index dcd9ceb..0fa9443 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -174,7 +175,7 @@ public class TestFuzzyRowAndColumnRangeFilter {
     ResultScanner scanner = hTable.getScanner(scan);
     List<Cell> results = new ArrayList<>();
     Result result;
-    long timeBeforeScan = System.currentTimeMillis();
+    long timeBeforeScan = EnvironmentEdgeManager.currentTime();
     while ((result = scanner.next()) != null) {
       for (Cell kv : result.listCells()) {
         LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
@@ -182,7 +183,7 @@ public class TestFuzzyRowAndColumnRangeFilter {
         results.add(kv);
       }
     }
-    long scanTime = System.currentTimeMillis() - timeBeforeScan;
+    long scanTime = EnvironmentEdgeManager.currentTime() - timeBeforeScan;
     scanner.close();
 
     LOG.info("scan time = " + scanTime + "ms");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java
index ca538aa..3548487 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.testclassification.FilterTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -341,14 +342,14 @@ public class TestFuzzyRowFilterEndToEnd {
     RegionScanner scanner = first.getScanner(scan);
     List<Cell> results = new ArrayList<>();
     // Result result;
-    long timeBeforeScan = System.currentTimeMillis();
+    long timeBeforeScan = EnvironmentEdgeManager.currentTime();
     int found = 0;
     while (scanner.next(results)) {
       found += results.size();
       results.clear();
     }
     found += results.size();
-    long scanTime = System.currentTimeMillis() - timeBeforeScan;
+    long scanTime = EnvironmentEdgeManager.currentTime() - timeBeforeScan;
     scanner.close();
 
     LOG.info("\nscan time = " + scanTime + "ms");
@@ -442,7 +443,7 @@ public class TestFuzzyRowFilterEndToEnd {
     ResultScanner scanner = hTable.getScanner(scan);
     List<Cell> results = new ArrayList<>();
     Result result;
-    long timeBeforeScan = System.currentTimeMillis();
+    long timeBeforeScan = EnvironmentEdgeManager.currentTime();
     while ((result = scanner.next()) != null) {
       for (Cell kv : result.listCells()) {
         LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
@@ -450,7 +451,7 @@ public class TestFuzzyRowFilterEndToEnd {
         results.add(kv);
       }
     }
-    long scanTime = System.currentTimeMillis() - timeBeforeScan;
+    long scanTime = EnvironmentEdgeManager.currentTime() - timeBeforeScan;
     scanner.close();
 
     LOG.info("scan time = " + scanTime + "ms");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
index a75c0d0..58c9011 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -112,10 +113,10 @@ public class TestBlockReorder {
     fop.close();
 
     // Let's check we can read it when everybody's there
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     FSDataInputStream fin = dfs.open(p);
     Assert.assertTrue(toWrite == fin.readDouble());
-    long end = System.currentTimeMillis();
+    long end = EnvironmentEdgeManager.currentTime();
     LOG.info("readtime= " + (end - start));
     fin.close();
     Assert.assertTrue((end - start) < 30 * 1000);
@@ -194,12 +195,11 @@ public class TestBlockReorder {
     // Now it will fail with a timeout, unfortunately it does not always connect to the same box,
     // so we try retries times;  with the reorder it will never last more than a few milli seconds
     for (int i = 0; i < retries; i++) {
-      start = System.currentTimeMillis();
-
+      start = EnvironmentEdgeManager.currentTime();
       fin = dfs.open(p);
       Assert.assertTrue(toWrite == fin.readDouble());
       fin.close();
-      end = System.currentTimeMillis();
+      end = EnvironmentEdgeManager.currentTime();
       LOG.info("HFileSystem readtime= " + (end - start));
       Assert.assertFalse("We took too much time to read", (end - start) > 60000);
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java
index ce2877c..ead8dcb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -121,13 +122,13 @@ public class TestBlockReorderBlockLocation {
     for (int i=0; i<10; i++){
       // The interceptor is not set in this test, so we get the raw list at this point
       LocatedBlocks l;
-      final long max = System.currentTimeMillis() + 10000;
+      final long max = EnvironmentEdgeManager.currentTime() + 10000;
       do {
         l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1);
         Assert.assertNotNull(l.getLocatedBlocks());
         Assert.assertEquals(1, l.getLocatedBlocks().size());
         Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length,
-            System.currentTimeMillis() < max);
+          EnvironmentEdgeManager.currentTime() < max);
       } while (l.get(0).getLocations().length != repCount);
 
       // Should be filtered, the name is different => The order won't change
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
index e2e059d..a6ff853 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -247,10 +248,10 @@ public class TestBlockReorderMultiBlocks {
     for (int i = 0; i < 10; i++) {
       LocatedBlocks l;
       // The NN gets the block list asynchronously, so we may need multiple tries to get the list
-      final long max = System.currentTimeMillis() + 10000;
+      final long max = EnvironmentEdgeManager.currentTime() + 10000;
       boolean done;
       do {
-        Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
+        Assert.assertTrue("Can't get enouth replica", EnvironmentEdgeManager.currentTime() < max);
         l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
         Assert.assertNotNull("Can't get block locations for " + src, l);
         Assert.assertNotNull(l.getLocatedBlocks());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
index 3f326a3..398817f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -612,9 +613,9 @@ public class TestHeapSize  {
       // do estimate in advance to ensure class is loaded
       ClassSize.estimateBase(cl, false);
 
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       ClassSize.estimateBase(cl, false);
-      long endTime = System.currentTimeMillis();
+      long endTime = EnvironmentEdgeManager.currentTime();
       assertTrue(endTime - startTime < 5);
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
index c6b8ddc..d929472 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -237,12 +238,12 @@ public class TestChangingEncoding {
     admin.majorCompact(tableName);
 
     // Waiting for the compaction to start, at least .5s.
-    final long maxWaitime = System.currentTimeMillis() + 500;
+    final long maxWaitime = EnvironmentEdgeManager.currentTime() + 500;
     boolean cont;
     do {
       cont = rs.getCompactSplitThread().getCompactionQueueSize() == 0;
       Threads.sleep(1);
-    } while (cont && System.currentTimeMillis() < maxWaitime);
+    } while (cont && EnvironmentEdgeManager.currentTime() < maxWaitime);
 
     while (rs.getCompactSplitThread().getCompactionQueueSize() > 0) {
       Threads.sleep(1);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index 2da64e2..d7294bf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.compress.Compressor;
 import org.junit.After;
@@ -760,11 +761,11 @@ public class TestHFileBlock {
     @Override
     public Boolean call() throws Exception {
       Random rand = new Random(clientId.hashCode());
-      long endTime = System.currentTimeMillis() + 10000;
+      long endTime = EnvironmentEdgeManager.currentTime() + 10000;
       int numBlocksRead = 0;
       int numPositionalRead = 0;
       int numWithOnDiskSize = 0;
-      while (System.currentTimeMillis() < endTime) {
+      while (EnvironmentEdgeManager.currentTime() < endTime) {
         int blockId = rand.nextInt(NUM_TEST_BLOCKS);
         long offset = offsets.get(blockId);
         // now we only support concurrent read with pread = true
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
index de6bc24..cd33aac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -139,7 +140,7 @@ public class TestHFileScannerImplReferenceCount {
     this.allocator = ByteBuffAllocator.create(UTIL.getConfiguration(), true);
     this.conf = new Configuration(UTIL.getConfiguration());
     this.fs = this.workDir.getFileSystem(conf);
-    this.hfilePath = new Path(this.workDir, caseName + System.currentTimeMillis());
+    this.hfilePath = new Path(this.workDir, caseName + EnvironmentEdgeManager.currentTime());
     LOG.info("Start to write {} cells into hfile: {}, case:{}", CELL_COUNT, hfilePath, caseName);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
index f34f369..e111ddd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
@@ -107,7 +107,7 @@ public class TestScannerFromBucketCache {
     byte[] qf2 = Bytes.toBytes("qualifier2");
     byte[] fam1 = Bytes.toBytes("lrucache");
 
-    long ts1 = 1; // System.currentTimeMillis();
+    long ts1 = 1;
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -145,7 +145,7 @@ public class TestScannerFromBucketCache {
     byte[] qf2 = Bytes.toBytes("qualifier2");
     byte[] fam1 = Bytes.toBytes("famoffheap");
 
-    long ts1 = 1; // System.currentTimeMillis();
+    long ts1 = 1;
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -186,7 +186,7 @@ public class TestScannerFromBucketCache {
     byte[] qf2 = Bytes.toBytes("qualifier2");
     byte[] fam1 = Bytes.toBytes("famoffheap");
 
-    long ts1 = 1; // System.currentTimeMillis();
+    long ts1 = 1;
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java
index ab282e3..61d91e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -58,13 +59,14 @@ public class TestProtobufRpcServiceImpl implements BlockingInterface {
   public static BlockingInterface newBlockingStub(RpcClient client, InetSocketAddress addr,
       User user) throws IOException {
     return TestProtobufRpcProto.newBlockingStub(client.createBlockingRpcChannel(
-      ServerName.valueOf(addr.getHostName(), addr.getPort(), System.currentTimeMillis()), user, 0));
+      ServerName.valueOf(addr.getHostName(), addr.getPort(),
+        EnvironmentEdgeManager.currentTime()), user, 0));
   }
 
   public static Interface newStub(RpcClient client, InetSocketAddress addr) throws IOException {
     return TestProtobufRpcProto.newStub(client.createRpcChannel(
-      ServerName.valueOf(addr.getHostName(), addr.getPort(), System.currentTimeMillis()),
-      User.getCurrent(), 0));
+      ServerName.valueOf(addr.getHostName(), addr.getPort(),
+        EnvironmentEdgeManager.currentTime()), User.getCurrent(), 0));
   }
 
   @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index dee9c65..f791421 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -480,7 +480,12 @@ public class TestSimpleRpcScheduler {
       for (String threadNamePrefix : threadNamePrefixs) {
         String threadName = Thread.currentThread().getName();
         if (threadName.startsWith(threadNamePrefix)) {
-          return timeQ.poll().longValue() + offset;
+          if (timeQ != null) {
+            Long qTime = timeQ.poll();
+            if (qTime != null) {
+              return qTime.longValue() + offset;
+            }
+          }
         }
       }
       return System.currentTimeMillis();
@@ -511,17 +516,16 @@ public class TestSimpleRpcScheduler {
     try {
       // Loading mocked call runner can take a good amount of time the first time through
       // (haven't looked why). Load it for first time here outside of the timed loop.
-      getMockedCallRunner(System.currentTimeMillis(), 2);
+      getMockedCallRunner(EnvironmentEdgeManager.currentTime(), 2);
       scheduler.start();
       EnvironmentEdgeManager.injectEdge(envEdge);
       envEdge.offset = 5;
       // Calls faster than min delay
       // LOG.info("Start");
       for (int i = 0; i < 100; i++) {
-        long time = System.currentTimeMillis();
+        long time = EnvironmentEdgeManager.currentTime();
         envEdge.timeQ.put(time);
         CallRunner cr = getMockedCallRunner(time, 2);
-        // LOG.info("" + i + " " + (System.currentTimeMillis() - now) + " cr=" + cr);
         scheduler.dispatch(cr);
       }
       // LOG.info("Loop done");
@@ -534,7 +538,7 @@ public class TestSimpleRpcScheduler {
       envEdge.offset = 151;
       // calls slower than min delay, but not individually slow enough to be dropped
       for (int i = 0; i < 20; i++) {
-        long time = System.currentTimeMillis();
+        long time = EnvironmentEdgeManager.currentTime();
         envEdge.timeQ.put(time);
         CallRunner cr = getMockedCallRunner(time, 2);
         scheduler.dispatch(cr);
@@ -549,7 +553,7 @@ public class TestSimpleRpcScheduler {
       envEdge.offset = 2000;
       // now slow calls and the ones to be dropped
       for (int i = 0; i < 60; i++) {
-        long time = System.currentTimeMillis();
+        long time = EnvironmentEdgeManager.currentTime();
         envEdge.timeQ.put(time);
         CallRunner cr = getMockedCallRunner(time, 100);
         scheduler.dispatch(cr);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
index b8f284e..dd27c79 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
@@ -453,9 +454,10 @@ public abstract class AbstractTestDLS {
         row = Arrays.copyOfRange(row, 3, 8); // use last 5 bytes because
         // HBaseTestingUtility.createMultiRegions use 5 bytes key
         byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i));
-        e.add(new KeyValue(row, COLUMN_FAMILY, qualifier, System.currentTimeMillis(), value));
+        e.add(new KeyValue(row, COLUMN_FAMILY, qualifier, EnvironmentEdgeManager.currentTime(),
+          value));
         log.appendData(curRegionInfo, new WALKeyImpl(curRegionInfo.getEncodedNameAsBytes(),
-          tableName, System.currentTimeMillis(), mvcc), e);
+          tableName, EnvironmentEdgeManager.currentTime(), mvcc), e);
         if (0 == i % syncEvery) {
           log.sync();
         }
@@ -510,12 +512,12 @@ public abstract class AbstractTestDLS {
 
   private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems)
       throws InterruptedException {
-    long curt = System.currentTimeMillis();
+    long curt = EnvironmentEdgeManager.currentTime();
     long endt = curt + timems;
     while (curt < endt) {
       if (ctr.sum() == oldval) {
         Thread.sleep(100);
-        curt = System.currentTimeMillis();
+        curt = EnvironmentEdgeManager.currentTime();
       } else {
         assertEquals(newval, ctr.sum());
         return;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 422c0a8..9eab2c1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MockServer;
 import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -85,7 +86,7 @@ public class TestActiveMasterManager {
       }
 
       // Create the master node with a dummy address
-      ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
+      ServerName master = ServerName.valueOf("localhost", 1, EnvironmentEdgeManager.currentTime());
       // Should not have a master yet
       DummyMaster dummyMaster = new DummyMaster(zk, master);
       ClusterStatusTracker clusterStatusTracker =
@@ -134,9 +135,9 @@ public class TestActiveMasterManager {
 
       // Create the master node with a dummy address
       ServerName firstMasterAddress =
-          ServerName.valueOf("localhost", 1, System.currentTimeMillis());
+          ServerName.valueOf("localhost", 1, EnvironmentEdgeManager.currentTime());
       ServerName secondMasterAddress =
-          ServerName.valueOf("localhost", 2, System.currentTimeMillis());
+          ServerName.valueOf("localhost", 2, EnvironmentEdgeManager.currentTime());
 
       // Should not have a master yet
       DummyMaster ms1 = new DummyMaster(zk, firstMasterAddress);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
index 0deea15..dd983cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -54,7 +55,7 @@ public class TestClockSkewDetection {
     RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder();
     request.setPort(1234);
     request.setServerStartCode(-1);
-    request.setServerCurrentTime(System.currentTimeMillis());
+    request.setServerCurrentTime(EnvironmentEdgeManager.currentTime());
     sm.regionServerStartup(request.build(), 0, "0.0.0", ia1);
 
     final Configuration c = HBaseConfiguration.create();
@@ -69,7 +70,7 @@ public class TestClockSkewDetection {
       request = RegionServerStartupRequest.newBuilder();
       request.setPort(1235);
       request.setServerStartCode(-1);
-      request.setServerCurrentTime(System.currentTimeMillis() - maxSkew * 2);
+      request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() - maxSkew * 2);
       sm.regionServerStartup(request.build(), 0, "0.0.0", ia2);
       fail("HMaster should have thrown a ClockOutOfSyncException but didn't.");
     } catch(ClockOutOfSyncException e) {
@@ -85,7 +86,7 @@ public class TestClockSkewDetection {
       request = RegionServerStartupRequest.newBuilder();
       request.setPort(1236);
       request.setServerStartCode(-1);
-      request.setServerCurrentTime(System.currentTimeMillis() + maxSkew * 2);
+      request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() + maxSkew * 2);
       sm.regionServerStartup(request.build(), 0, "0.0.0", ia3);
       fail("HMaster should have thrown a ClockOutOfSyncException but didn't.");
     } catch (ClockOutOfSyncException e) {
@@ -99,7 +100,7 @@ public class TestClockSkewDetection {
     request = RegionServerStartupRequest.newBuilder();
     request.setPort(1237);
     request.setServerStartCode(-1);
-    request.setServerCurrentTime(System.currentTimeMillis() - warningSkew * 2);
+    request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() - warningSkew * 2);
     sm.regionServerStartup(request.build(), 0, "0.0.0", ia4);
 
     // make sure values above warning threshold but below max threshold don't kill
@@ -108,7 +109,7 @@ public class TestClockSkewDetection {
     request = RegionServerStartupRequest.newBuilder();
     request.setPort(1238);
     request.setServerStartCode(-1);
-    request.setServerCurrentTime(System.currentTimeMillis() + warningSkew * 2);
+    request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() + warningSkew * 2);
     sm.regionServerStartup(request.build(), 0, "0.0.0", ia5);
   }
 }
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
index 890399c..69247ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -92,7 +93,8 @@ public class TestMasterMetrics {
               try {
                 serverManager.regionServerReport(sn,
                   ServerMetricsBuilder.newBuilder(sn).setVersionNumber(sm.getVersionNumber())
-                    .setVersion(sm.getVersion()).setLastReportTimestamp(System.currentTimeMillis())
+                    .setVersion(sm.getVersion())
+                      .setLastReportTimestamp(EnvironmentEdgeManager.currentTime())
                     .build());
               } catch (YouAreDeadException e) {
                 throw new UncheckedIOException(e);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
index 578ee1c..4bcf732 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
@@ -134,7 +135,7 @@ public class TestMasterMetricsWrapper {
         Bytes.toBytes("Z"), 5);
 
       // wait till the table is assigned
-      long timeoutTime = System.currentTimeMillis() + 1000;
+      long timeoutTime = EnvironmentEdgeManager.currentTime() + 1000;
       while (true) {
         List<RegionInfo> regions = master.getAssignmentManager().
           getRegionStates().getRegionsOfTable(table);
@@ -142,7 +143,7 @@ public class TestMasterMetricsWrapper {
           hri = regions.get(2);
           break;
         }
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (now > timeoutTime) {
           fail("Could not find an online region");
         }
@@ -155,14 +156,14 @@ public class TestMasterMetricsWrapper {
 
       TEST_UTIL.getAdmin().offline(hri.getRegionName());
 
-      timeoutTime = System.currentTimeMillis() + 800;
+      timeoutTime = EnvironmentEdgeManager.currentTime() + 800;
       RegionStates regionStates = master.getAssignmentManager().getRegionStates();
       while (true) {
         if (regionStates.getRegionByStateOfTable(table)
             .get(RegionState.State.OFFLINE).contains(hri)) {
           break;
         }
-        long now = System.currentTimeMillis();
+        long now = EnvironmentEdgeManager.currentTime();
         if (now > timeoutTime) {
           fail("Failed to offline the region in time");
           break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
index 425d08b..3c690a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -71,20 +72,20 @@ public class TestMetaAssignmentWithStopMaster {
       ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName();
 
       UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test");
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       while (UTIL.getMiniHBaseCluster().getMaster() == null ||
         UTIL.getMiniHBaseCluster().getMaster().getServerName().equals(oldMaster)) {
         LOG.info("Wait the standby master become active");
         Thread.sleep(3000);
-        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+        if (EnvironmentEdgeManager.currentTime() - startTime > WAIT_TIMEOUT) {
           fail("Wait too long for standby master become active");
         }
       }
-      startTime = System.currentTimeMillis();
+      startTime = EnvironmentEdgeManager.currentTime();
       while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
         LOG.info("Wait the new active master to be initialized");
         Thread.sleep(3000);
-        if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) {
+        if (EnvironmentEdgeManager.currentTime() - startTime > WAIT_TIMEOUT) {
           fail("Wait too long for the new active master to be initialized");
         }
       }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
index 3983040..ce8b4c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
@@ -192,7 +193,7 @@ public class TestRegionPlacement {
       throws IOException, InterruptedException, KeeperException {
     ServerName serverToKill = null;
     int killIndex = 0;
-    Random random = new Random(System.currentTimeMillis());
+    Random random = new Random(EnvironmentEdgeManager.currentTime());
     ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
     LOG.debug("Server holding meta " + metaServer);
     boolean isNamespaceServer = false;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
index 415ad67..7804784 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
 import org.apache.hadoop.hbase.regionserver.TestMasterAddressTracker.NodeCreationListener;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -253,7 +254,7 @@ public class TestSplitLogManager {
     assertTrue(task.isOrphan());
     waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
     assertFalse(task.isUnassigned());
-    long curt = System.currentTimeMillis();
+    long curt = EnvironmentEdgeManager.currentTime();
     assertTrue((task.last_update <= curt) &&
         (task.last_update > (curt - 1000)));
     LOG.info("waiting for manager to resubmit the orphan task");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
index 79cde38..03564bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -99,7 +100,7 @@ public class TestWarmupRegion {
 
     // future timestamp
     for (int i = 0; i < numRows; i++) {
-      long ts = System.currentTimeMillis() * 2;
+      long ts = EnvironmentEdgeManager.currentTime() * 2;
       Put put = new Put(ROW, ts);
       put.addColumn(FAMILY, COLUMN, VALUE);
       table.put(put);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index acd76ee..445aeaf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.zookeeper.KeeperException;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -149,7 +150,7 @@ public class MockMasterServices extends MockNoopMasterServices {
     for (int i = 0; i < numServes; ++i) {
       ServerName sn = ServerName.valueOf("localhost", 100 + i, 1);
       serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn)
-        .setLastReportTimestamp(System.currentTimeMillis()).build());
+        .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build());
     }
     this.procedureExecutor.getEnvironment().setEventReady(initialized, true);
   }
@@ -176,7 +177,7 @@ public class MockMasterServices extends MockNoopMasterServices {
     }
     ServerName sn = ServerName.valueOf(serverName.getAddress().toString(), startCode);
     serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn)
-      .setLastReportTimestamp(System.currentTimeMillis()).build());
+      .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build());
   }
 
   @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index b7dd87b..a631fe6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -144,14 +145,14 @@ public class TestAssignmentManager extends TestAssignmentManagerBase {
 
     TransitRegionStateProcedure[] assignments = new TransitRegionStateProcedure[nRegions];
 
-    long st = System.currentTimeMillis();
+    long st = EnvironmentEdgeManager.currentTime();
     bulkSubmit(assignments);
 
     for (int i = 0; i < assignments.length; ++i) {
       ProcedureTestingUtility.waitProcedure(master.getMasterProcedureExecutor(), assignments[i]);
       assertTrue(assignments[i].toString(), assignments[i].isSuccess());
     }
-    long et = System.currentTimeMillis();
+    long et = EnvironmentEdgeManager.currentTime();
     float sec = ((et - st) / 1000.0f);
     LOG.info(String.format("[T] Assigning %dprocs in %s (%.2fproc/sec)", assignments.length,
       StringUtils.humanTimeDiff(et - st), assignments.length / sec));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
index c3a613c..85255d2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.ipc.RemoteException;
 import org.junit.After;
 import org.junit.Before;
@@ -310,7 +311,7 @@ public abstract class TestAssignmentManagerBase {
     newRsAdded++;
     try {
       this.master.getServerManager().regionServerReport(newSn, ServerMetricsBuilder
-        .newBuilder(newSn).setLastReportTimestamp(System.currentTimeMillis()).build());
+        .newBuilder(newSn).setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build());
     } catch (YouAreDeadException e) {
       // should not happen
       throw new UncheckedIOException(e);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java
index 202d99e..484e81c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.junit.AfterClass;
@@ -147,7 +148,7 @@ public class TestRegionReplicaSplit {
       final RegionInfo fakeHri =
         RegionInfoBuilder.newBuilder(table.getName()).setStartKey(Bytes.toBytes("a"))
           .setEndKey(Bytes.toBytes("b")).setReplicaId(1)
-          .setRegionId(System.currentTimeMillis()).build();
+          .setRegionId(EnvironmentEdgeManager.currentTime()).build();
 
       // To test AssignProcedure can defend this case.
       HTU.getMiniHBaseCluster().getMaster().getAssignmentManager().assign(fakeHri);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
index ad13401..7258cc1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
@@ -158,7 +158,7 @@ public class TestRegionStateStore {
 
   @Test
   public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     ServerName serverName0 =
       ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong());
     TableName tableName = name.getTableName();
@@ -189,7 +189,7 @@ public class TestRegionStateStore {
   @Test
   public void testEmptyMetaDaughterLocationDuringSplit() throws IOException {
     TableName tableName = name.getTableName();
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     ServerName serverName0 =
       ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong());
     RegionInfo parent = RegionInfoBuilder.newBuilder(tableName)
@@ -230,7 +230,7 @@ public class TestRegionStateStore {
 
   @Test
   public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     ServerName serverName0 =
       ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong());
 
@@ -261,7 +261,7 @@ public class TestRegionStateStore {
 
   @Test
   public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     TableName tableName = name.getTableName();
 
     RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(tableName)
@@ -397,7 +397,7 @@ public class TestRegionStateStore {
 
   @Test
   public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException {
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     TableName tableName = name.getTableName();
     RegionInfo primary = RegionInfoBuilder.newBuilder(tableName)
       .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
index b24ec16..c887e3a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.junit.After;
@@ -170,7 +171,7 @@ public class TestRegionStates {
     final int NRUNS = 1000000; // 1M
     final RegionStates stateMap = new RegionStates();
 
-    long st = System.currentTimeMillis();
+    long st = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < NRUNS; ++i) {
       final int regionId = i;
       executorService.submit(new Callable<Object>() {
@@ -182,12 +183,12 @@ public class TestRegionStates {
       });
     }
     waitExecutorService(NRUNS);
-    long et = System.currentTimeMillis();
+    long et = EnvironmentEdgeManager.currentTime();
     LOG.info(String.format("PERF STATEMAP INSERT: %s %s/sec",
       StringUtils.humanTimeDiff(et - st),
       StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f))));
 
-    st = System.currentTimeMillis();
+    st = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < NRUNS; ++i) {
       final int regionId = i;
       executorService.submit(new Callable<Object>() {
@@ -200,7 +201,7 @@ public class TestRegionStates {
     }
 
     waitExecutorService(NRUNS);
-    et = System.currentTimeMillis();
+    et = EnvironmentEdgeManager.currentTime();
     LOG.info(String.format("PERF STATEMAP GET: %s %s/sec",
       StringUtils.humanTimeDiff(et - st),
       StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f))));
@@ -212,11 +213,11 @@ public class TestRegionStates {
     final int NRUNS = 1 * 1000000; // 1M
 
     final RegionStates stateMap = new RegionStates();
-    long st = System.currentTimeMillis();
+    long st = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < NRUNS; ++i) {
       stateMap.createRegionStateNode(createRegionInfo(TABLE_NAME, i));
     }
-    long et = System.currentTimeMillis();
+    long et = EnvironmentEdgeManager.currentTime();
     LOG.info(String.format("PERF SingleThread: %s %s/sec",
         StringUtils.humanTimeDiff(et - st),
       StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f))));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
index 70388ef..932941c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -149,7 +150,8 @@ public class TestRogueRSAssignment {
     List<RegionInfo> tableRegions = createTable(tableName);
 
     final ServerName sn = ServerName.parseVersionedServerName(
-        ServerName.valueOf("1.example.org", 1, System.currentTimeMillis()).getVersionedBytes());
+        ServerName.valueOf("1.example.org", 1, EnvironmentEdgeManager.currentTime())
+          .getVersionedBytes());
 
     // make fake request with a region assigned to different RS
     RegionServerStatusProtos.RegionServerReportRequest.Builder request =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index f947bd5..7e882cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -86,7 +86,7 @@ public class TestHFileCleaner {
     Path root = UTIL.getDataTestDirOnTestFS();
     Path file = new Path(root, "file");
     fs.createNewFile(file);
-    long createTime = System.currentTimeMillis();
+    long createTime = EnvironmentEdgeManager.currentTime();
     assertTrue("Test file not created!", fs.exists(file));
     TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
     // update the time info for the file, so the cleaner removes it
@@ -164,7 +164,7 @@ public class TestHFileCleaner {
     HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL);
 
     // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
-    final long createTime = System.currentTimeMillis();
+    final long createTime = EnvironmentEdgeManager.currentTime();
     fs.delete(archivedHfileDir, true);
     fs.mkdirs(archivedHfileDir);
     // Case 1: 1 invalid file, which should be deleted directly
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 2cae1b7..b465e9b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MockServer;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -151,7 +152,7 @@ public class TestLogsCleaner {
     final FileSystem fs = FileSystem.get(conf);
     fs.mkdirs(OLD_PROCEDURE_WALS_DIR);
 
-    final long now = System.currentTimeMillis();
+    final long now = EnvironmentEdgeManager.currentTime();
 
     // Case 1: 2 invalid files, which would be deleted directly
     fs.createNewFile(new Path(OLD_WALS_DIR, "a"));
@@ -227,8 +228,8 @@ public class TestLogsCleaner {
     ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
 
     List<FileStatus> dummyFiles = Arrays.asList(
-        new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
-        new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2"))
+      new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log1")),
+      new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log2"))
     );
 
     FaultyZooKeeperWatcher faultyZK =
@@ -287,7 +288,7 @@ public class TestLogsCleaner {
 
     // Subtract 1000 from current time so modtime is for sure older
     // than 'now'.
-    long modTime = System.currentTimeMillis() - 1000;
+    long modTime = EnvironmentEdgeManager.currentTime() - 1000;
     List<FileStatus> dummyFiles = Arrays.asList(
         new FileStatus(100, false, 3, 100, modTime, new Path("log1")),
         new FileStatus(100, false, 3, 100, modTime, new Path("log2"))
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 0febd38..f21ae92 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MockServer;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
@@ -191,9 +192,10 @@ public class TestReplicationHFileCleaner {
     ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
 
     List<FileStatus> dummyFiles =
-        Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path(
-            "hfile1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path(
-            "hfile2")));
+      Lists.newArrayList(new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(),
+          new Path("hfile1")),
+        new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(),
+          new Path("hfile2")));
 
     FaultyZooKeeperWatcher faultyZK =
         new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
index 502f430..83436f9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
@@ -147,7 +148,7 @@ public class TestCatalogJanitor {
     Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, splita,
       td.getColumnFamilies()[0].getName());
     Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     // Reference name has this format: StoreFile#REF_NAME_PARSER
     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
     FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem();
@@ -620,7 +621,7 @@ public class TestCatalogJanitor {
       td.getColumnFamilies()[0].getName());
     Reference ref =
       top ? Reference.createTopReference(midkey) : Reference.createBottomReference(midkey);
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     // Reference name has this format: StoreFile#REF_NAME_PARSER
     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
     FileSystem fs = services.getMasterFileSystem().getFileSystem();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java
index 3841d8c..7ff39c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.Before;
@@ -122,11 +123,11 @@ public class TestCatalogJanitorCluster {
     RegionInfo ri = t3Ris.get(0);
     RegionInfo newRi1 = RegionInfoBuilder.newBuilder(ri.getTable())
       .setStartKey(incrementRow(ri.getStartKey())).setEndKey(incrementRow(ri.getEndKey())).build();
-    Put p1 = MetaTableAccessor.makePutFromRegionInfo(newRi1, System.currentTimeMillis());
+    Put p1 = MetaTableAccessor.makePutFromRegionInfo(newRi1, EnvironmentEdgeManager.currentTime());
     RegionInfo newRi2 = RegionInfoBuilder.newBuilder(newRi1.getTable())
       .setStartKey(incrementRow(newRi1.getStartKey())).setEndKey(incrementRow(newRi1.getEndKey()))
       .build();
-    Put p2 = MetaTableAccessor.makePutFromRegionInfo(newRi2, System.currentTimeMillis());
+    Put p2 = MetaTableAccessor.makePutFromRegionInfo(newRi2, EnvironmentEdgeManager.currentTime());
     MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(p1, p2));
     janitor.scan();
     report = janitor.getLastReport();
@@ -183,7 +184,8 @@ public class TestCatalogJanitorCluster {
     // add a new region [a, cc)
     RegionInfo newRiT4 = RegionInfoBuilder.newBuilder(T4).setStartKey("a".getBytes())
       .setEndKey("cc".getBytes()).build();
-    Put putForT4 = MetaTableAccessor.makePutFromRegionInfo(newRiT4, System.currentTimeMillis());
+    Put putForT4 = MetaTableAccessor.makePutFromRegionInfo(newRiT4,
+      EnvironmentEdgeManager.currentTime());
     MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(putForT4));
 
     janitor.scan();
@@ -205,7 +207,8 @@ public class TestCatalogJanitorCluster {
     // add a new region [a, g)
     RegionInfo newRiT5 = RegionInfoBuilder.newBuilder(T5).setStartKey("a".getBytes())
       .setEndKey("g".getBytes()).build();
-    Put putForT5 = MetaTableAccessor.makePutFromRegionInfo(newRiT5, System.currentTimeMillis());
+    Put putForT5 = MetaTableAccessor.makePutFromRegionInfo(newRiT5,
+      EnvironmentEdgeManager.currentTime());
     MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(putForT5));
 
     janitor.scan();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
index 10bf0d6..5ac20fc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
@@ -179,13 +180,13 @@ public class TestCatalogJanitorInMemoryStates {
    * @return Daughter regions; caller needs to check table actually split.
    */
   private PairOfSameType<RegionInfo> waitOnDaughters(final RegionInfo r) throws IOException {
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     PairOfSameType<RegionInfo> pair = null;
     try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
       Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
       Result result = null;
       RegionInfo region = null;
-      while ((System.currentTimeMillis() - start) < 60000) {
+      while ((EnvironmentEdgeManager.currentTime() - start) < 60000) {
         result = metaTable.get(new Get(r.getRegionName()));
         if (result == null) {
           break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
index 1cb5a96..063a39e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
@@ -168,12 +169,12 @@ public class TestMetaFixer {
   private static RegionInfo makeOverlap(MasterServices services, RegionInfo a, RegionInfo b)
       throws IOException {
     RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()).
-        setStartKey(a.getStartKey()).
-        setEndKey(b.getEndKey()).
-        build();
+      setStartKey(a.getStartKey()).
+      setEndKey(b.getEndKey()).
+      build();
     MetaTableAccessor.putsToMetaTable(services.getConnection(),
-        Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion,
-            System.currentTimeMillis())));
+      Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion,
+        EnvironmentEdgeManager.currentTime())));
     // TODO: Add checks at assign time to PREVENT being able to assign over existing assign.
     long assign = services.getAssignmentManager().assign(overlapRegion);
     ProcedureTestingUtility.waitProcedures(services.getMasterProcedureExecutor(), assign);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
index e75acb7..049e060 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.hamcrest.core.IsInstanceOf;
 import org.hamcrest.core.StringStartsWith;
 import org.junit.After;
@@ -192,8 +193,8 @@ public class TestLockProcedure {
    * @throws TimeoutException if lock couldn't be acquired.
    */
   private boolean awaitForLocked(long procId, long timeoutInMs) throws Exception {
-    long deadline = System.currentTimeMillis() + timeoutInMs;
-    while (System.currentTimeMillis() < deadline) {
+    long deadline = EnvironmentEdgeManager.currentTime() + timeoutInMs;
+    while (EnvironmentEdgeManager.currentTime() < deadline) {
       LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null,
           LockHeartbeatRequest.newBuilder().setProcId(procId).build());
       if (response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) {
@@ -297,7 +298,7 @@ public class TestLockProcedure {
     // Acquire namespace lock, then queue other locks.
     long nsProcId = queueLock(nsLock);
     assertTrue(awaitForLocked(nsProcId, 2000));
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     sendHeartbeatAndCheckLocked(nsProcId, true);
     long table1ProcId = queueLock(tableLock1);
     long table2ProcId = queueLock(tableLock2);
@@ -305,7 +306,7 @@ public class TestLockProcedure {
     long regions2ProcId = queueLock(regionsLock2);
 
     // Assert tables & region locks are waiting because of namespace lock.
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     // leave extra 10 msec in case more than half the HEARTBEAT_TIMEOUT has passed
     Thread.sleep(Math.min(HEARTBEAT_TIMEOUT / 2, Math.max(HEARTBEAT_TIMEOUT-(now-start)-10, 0)));
     sendHeartbeatAndCheckLocked(nsProcId, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
index 898108b..7ea6a10 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.Bytes;
-
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
 
@@ -201,7 +201,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
   private class AddProcsWorker extends Thread {
     @Override
     public void run() {
-      final Random rand = new Random(System.currentTimeMillis());
+      final Random rand = new Random(EnvironmentEdgeManager.currentTime());
       long procId = procIds.incrementAndGet();
       int index;
       while (procId <= numOps) {
@@ -245,14 +245,14 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
    * @return time taken by threads to complete, in milliseconds.
    */
   long runThreads(Thread[] threads) throws Exception {
-    final long startTime = System.currentTimeMillis();
+    final long startTime = EnvironmentEdgeManager.currentTime();
     for (Thread t : threads) {
       t.start();
     }
     for (Thread t : threads) {
       t.join();
     }
-    return System.currentTimeMillis() - startTime;
+    return EnvironmentEdgeManager.currentTime() - startTime;
   }
 
   @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 3dca544..96693e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -341,7 +342,7 @@ public class MasterProcedureTestingUtility {
     // Ensure one row per region
     assertTrue(rows >= splitKeys.length);
     for (byte[] k: splitKeys) {
-      byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k);
+      byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), k);
       byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value)));
       mutator.mutate(createPut(families, key, value));
       rows--;
@@ -349,7 +350,8 @@ public class MasterProcedureTestingUtility {
 
     // Add other extra rows. more rows, more files
     while (rows-- > 0) {
-      byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
+      byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()),
+        Bytes.toBytes(rows));
       byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
       mutator.mutate(createPut(families, key, value));
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
index d90192a..63a50c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -70,7 +71,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
   private SnapshotProtos.SnapshotDescription getSnapshot() throws Exception {
     if (snapshot == null) {
       final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot");
-      long tid = System.currentTimeMillis();
+      long tid = EnvironmentEdgeManager.currentTime();
       final String snapshotName = "snapshot-" + tid;
 
       Admin admin = UTIL.getAdmin();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
index afe6b96..1337479 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -100,7 +101,7 @@ public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase {
   }
 
   private void setupSnapshotAndUpdateTable() throws Exception {
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     final String snapshotName = "snapshot-" + tid;
     Admin admin = UTIL.getAdmin();
     // create Table
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java
index 8df1694..49c9896 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -104,12 +105,12 @@ public class TestSafemodeBringsDownMaster {
     DistributedFileSystem dfs = (DistributedFileSystem) dfsCluster.getFileSystem();
     dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
     final long timeOut = 180000;
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     int index = -1;
     do {
       index = UTIL.getMiniHBaseCluster().getServerWithMeta();
     } while (index == -1 &&
-      startTime + timeOut < System.currentTimeMillis());
+      startTime + timeOut < EnvironmentEdgeManager.currentTime());
 
     if (index != -1){
       UTIL.getMiniHBaseCluster().abortRegionServer(index);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
index 9082b1d..45102dd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
 import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 
@@ -83,7 +84,7 @@ public class MasterRegionTestBase {
     Server server = mock(Server.class);
     when(server.getConfiguration()).thenReturn(htu.getConfiguration());
     when(server.getServerName())
-      .thenReturn(ServerName.valueOf("localhost", 12345, System.currentTimeMillis()));
+      .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
     when(server.getChoreService()).thenReturn(choreService);
     Path testDir = htu.getDataTestDir();
     CommonFSUtils.setRootDir(htu.getConfiguration(), testDir);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java
index 7fc72b3..713fc30 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -128,7 +129,7 @@ public class TestMasterRegionCompaction extends MasterRegionTestBase {
     Thread.sleep(2000);
     // touch one file
 
-    long currentTime = System.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
     fs.setTimes(compactedHFiles[0].getPath(), currentTime, currentTime);
     Thread.sleep(3000);
     // only the touched file is still there after clean up
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
index 4e49cb4..9d1e60f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -135,7 +136,8 @@ public class TestMasterRegionOnTwoFileSystems {
     Path walRootDir = WAL_UTIL.getDataTestDirOnTestFS();
     FileSystem walFs = WAL_UTIL.getTestFileSystem();
     walFs.delete(walRootDir, true);
-    region = createMasterRegion(ServerName.valueOf("localhost", 12345, System.currentTimeMillis()));
+    region = createMasterRegion(ServerName.valueOf("localhost", 12345,
+      EnvironmentEdgeManager.currentTime()));
   }
 
   @After
@@ -219,7 +221,7 @@ public class TestMasterRegionOnTwoFileSystems {
       }
       region.close(true);
       region = createMasterRegion(
-        ServerName.valueOf("localhost", 12345, System.currentTimeMillis() + round + 1));
+        ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime() + round + 1));
       try (RegionScanner scanner = region.getScanner(new Scan())) {
         List<Cell> cells = new ArrayList<>();
         boolean moreValues = true;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
index 8972dba..bf064c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Assert;
 
 public class MobTestUtil {
@@ -63,7 +64,7 @@ public class MobTestUtil {
    */
   private static void writeStoreFile(final StoreFileWriter writer, byte[] fam,
       byte[] qualifier) throws IOException {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
         for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
index db99fae..e4229fe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -138,14 +139,14 @@ public class TestExpiredMobFileCleaner {
     Path mobDirPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family);
 
     byte[] dummyData = makeDummyData(600);
-    long ts = System.currentTimeMillis() - 3 * secondsOfDay() * 1000; // 3 days before
+    long ts = EnvironmentEdgeManager.currentTime() - 3 * secondsOfDay() * 1000; // 3 days before
     putKVAndFlush(table, row1, dummyData, ts);
     FileStatus[] firstFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
     //the first mob file
     assertEquals("Before cleanup without delay 1", 1, firstFiles.length);
     String firstFile = firstFiles[0].getPath().getName();
 
-    ts = System.currentTimeMillis() - 1 * secondsOfDay() * 1000; // 1 day before
+    ts = EnvironmentEdgeManager.currentTime() - 1 * secondsOfDay() * 1000; // 1 day before
     putKVAndFlush(table, row2, dummyData, ts);
     FileStatus[] secondFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
     //now there are 2 mob files
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
index fefa898..d5f9ae5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -103,7 +104,7 @@ public class TestMobDataBlockEncoding {
   public void testDataBlockEncoding(DataBlockEncoding encoding) throws Exception {
     String TN = "testDataBlockEncoding" + encoding;
     setUp(defaultThreshold, TN, encoding);
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
     byte[] value = generateMobValue((int) defaultThreshold + 1);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java
index 1865c0b..25c6edc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreCompaction.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.ClassRule;
@@ -336,13 +337,13 @@ public class TestMobStoreCompaction {
     HFileContext meta = new HFileContextBuilder().build();
     HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
         .withFileContext(meta).create();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY,
           Bytes.toBytes("colX"), now, dummyData);
       writer.append(kv);
     } finally {
-      writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
+      writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
       writer.close();
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java
index 32c62ca..b6b03e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobStoreScanner.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -178,7 +179,7 @@ public class TestMobStoreScanner {
   public void testReadPt() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     setUp(0L, tableName);
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     byte[] value1 = Bytes.toBytes("value1");
     Put put1 = new Put(row1);
     put1.addColumn(family, qf1, ts, value1);
@@ -278,7 +279,7 @@ public class TestMobStoreScanner {
   private void testGet(TableName tableName, boolean reversed, boolean doFlush)
       throws Exception {
     setUp(defaultThreshold, tableName);
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
     byte [] value = generateMobValue((int)defaultThreshold+1);
@@ -301,7 +302,7 @@ public class TestMobStoreScanner {
   private void testGetReferences(boolean reversed) throws Exception {
     TableName tn = TableName.valueOf("testGetReferences" + reversed);
     setUp(defaultThreshold, tn);
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
     byte [] value = generateMobValue((int)defaultThreshold+1);
@@ -337,7 +338,7 @@ public class TestMobStoreScanner {
     byte [] valueLess = generateMobValue((int)defaultThreshold-1);
     byte [] valueEqual = generateMobValue((int)defaultThreshold);
     byte [] valueGreater = generateMobValue((int)defaultThreshold+1);
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -384,7 +385,7 @@ public class TestMobStoreScanner {
   private void testGetFromArchive(boolean reversed) throws Exception {
     TableName tn = TableName.valueOf("testGetFromArchive" + reversed);
     setUp(defaultThreshold, tn);
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
     byte [] value = generateMobValue((int)defaultThreshold+1);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
index de29223..1a9fee8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluat
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MockServer;
 import org.apache.hadoop.hbase.util.Pair;
 
@@ -42,7 +43,7 @@ public class RegionProcedureStorePerformanceEvaluation
     private final Configuration conf;
 
     private final ServerName serverName =
-      ServerName.valueOf("localhost", 12345, System.currentTimeMillis());
+      ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime());
 
     public DummyServer(Configuration conf) {
       this.conf = conf;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
index dab1825..3246169 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.master.region.MasterRegion;
 import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 final class RegionProcedureStoreTestHelper {
 
@@ -39,7 +40,7 @@ final class RegionProcedureStoreTestHelper {
     Server server = mock(Server.class);
     when(server.getConfiguration()).thenReturn(conf);
     when(server.getServerName())
-      .thenReturn(ServerName.valueOf("localhost", 12345, System.currentTimeMillis()));
+      .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
     return server;
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java
index d7a0ce7..b05cc67 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -162,7 +163,7 @@ public class TestRegionProcedureStore extends RegionProcedureStoreTestBase {
     return new RpcCall() {
       @Override
       public long getDeadline() {
-        return System.currentTimeMillis();
+        return EnvironmentEdgeManager.currentTime();
       }
 
       @Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index 96dc990..41fcf92 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -130,7 +130,7 @@ public class TestRateLimiter {
 
     // fix the current time in order to get the precise value of interval
     EnvironmentEdge edge = new EnvironmentEdge() {
-      private final long ts = System.currentTimeMillis();
+      private final long ts = EnvironmentEdgeManager.currentTime();
 
       @Override
       public long currentTime() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index 6733690..6d96351 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -230,15 +230,15 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     addRows(this.memstore);
     Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY);
     assertTrue(CellComparator.getInstance().compareRows(closestToEmpty,
-        new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
+        new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0);
     for (int i = 0; i < ROW_COUNT; i++) {
       Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
-          System.currentTimeMillis()));
+        EnvironmentEdgeManager.currentTime()));
       if (i + 1 == ROW_COUNT) {
         assertNull(nr);
       } else {
         assertTrue(CellComparator.getInstance().compareRows(nr,
-            new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
+            new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0);
       }
     }
     //starting from each row, validate results should contain the starting row
@@ -865,7 +865,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     int cellsCount = hmc.getActive().getCellsCount();
     int totalLen = 0;
     for (int i = 0; i < keys.length; i++) {
-      long timestamp = System.currentTimeMillis();
+      long timestamp = EnvironmentEdgeManager.currentTime();
       Threads.sleep(1); // to make sure each kv gets a different ts
       byte[] row = Bytes.toBytes(keys[i]);
       byte[] val = Bytes.toBytes(keys[i] + i);
@@ -889,7 +889,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     int cellsCount = hmc.getActive().getCellsCount();
     int totalLen = 0;
     for (int i = 0; i < keys.length; i++) {
-      long timestamp = System.currentTimeMillis();
+      long timestamp = EnvironmentEdgeManager.currentTime();
       Threads.sleep(1); // to make sure each kv gets a different ts
       byte[] row = Bytes.toBytes(keys[i]);
       KeyValue kv = new KeyValue(row, fam, qf, timestamp, val);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
index 072daa8..4e861da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -493,7 +494,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
   private void addRowsByKeysWith50Cols(AbstractMemStore hmc, String[] keys) {
     byte[] fam = Bytes.toBytes("testfamily");
     for (int i = 0; i < keys.length; i++) {
-      long timestamp = System.currentTimeMillis();
+      long timestamp = EnvironmentEdgeManager.currentTime();
       Threads.sleep(1); // to make sure each kv gets a different ts
       byte[] row = Bytes.toBytes(keys[i]);
       for(int  j =0 ;j < 50; j++) {
@@ -629,8 +630,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     byte[] row = Bytes.toBytes(keys1[0]);
     byte[] val = Bytes.toBytes(keys1[0] + 0);
     KeyValue kv =
-        new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
-            System.currentTimeMillis(), val);
+      new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
+        EnvironmentEdgeManager.currentTime(), val);
 
     // test 1 bucket
     int totalCellsLen = addRowsByKeys(memstore, keys1);
@@ -695,8 +696,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     byte[] row = Bytes.toBytes(keys1[0]);
     byte[] val = Bytes.toBytes(bigVal);
     KeyValue kv =
-            new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
-                    System.currentTimeMillis(), val);
+      new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
+        EnvironmentEdgeManager.currentTime(), val);
 
     // test 1 bucket
     int totalCellsLen = addRowsByKeys(memstore, keys1, val);
@@ -769,8 +770,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     byte[] row = Bytes.toBytes(keys1[0]);
     byte[] val = Bytes.toBytes(bigVal);
     KeyValue kv =
-            new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
-                    System.currentTimeMillis(), val);
+      new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
+        EnvironmentEdgeManager.currentTime(), val);
 
     // test 1 bucket
     int totalCellsLen = addRowsByKeys(memstore, keys1, val);
@@ -872,7 +873,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
 
     // Measuring the size of a single kv
     KeyValue kv = new KeyValue(Bytes.toBytes("A"), Bytes.toBytes("testfamily"),
-            Bytes.toBytes("testqualifier"), System.currentTimeMillis(), val);
+            Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val);
     long oneCellOnCCMHeapSize =
         (long) ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize());
     long oneCellOnCSLMHeapSize =
@@ -908,7 +909,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     byte[] qf = Bytes.toBytes("testqualifier");
     MemStoreSizing memstoreSizing = new NonThreadSafeMemStoreSizing();
     for (int i = 0; i < keys.length; i++) {
-      long timestamp = System.currentTimeMillis();
+      long timestamp = EnvironmentEdgeManager.currentTime();
       Threads.sleep(1); // to make sure each kv gets a different ts
       byte[] row = Bytes.toBytes(keys[i]);
       byte[] val = Bytes.toBytes(keys[i] + i);
@@ -927,8 +928,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     byte[] row = Bytes.toBytes("A");
     byte[] val = Bytes.toBytes("A" + 0);
     KeyValue kv =
-        new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
-            System.currentTimeMillis(), val);
+      new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
+        EnvironmentEdgeManager.currentTime(), val);
     return ClassSize.align(
         ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize());
   }
@@ -938,8 +939,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore
     byte[] row = Bytes.toBytes("A");
     byte[] val = Bytes.toBytes("A" + 0);
     KeyValue kv =
-        new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
-            System.currentTimeMillis(), val);
+      new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"),
+        EnvironmentEdgeManager.currentTime(), val);
 
     return toCellChunkMap ?
         ClassSize.align(
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
index 39171da..66fd583 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -190,14 +191,14 @@ public class TestCompactionState {
           admin.majorCompact(table);
         }
       }
-      long curt = System.currentTimeMillis();
+      long curt = EnvironmentEdgeManager.currentTime();
       long waitTime = 5000;
       long endt = curt + waitTime;
       CompactionState state = getCompactionState(stateSource, master, admin, table);
       while (state == CompactionState.NONE && curt < endt) {
         Thread.sleep(10);
         state = getCompactionState(stateSource, master, admin, table);
-        curt = System.currentTimeMillis();
+        curt = EnvironmentEdgeManager.currentTime();
       }
       // Now, should have the right compaction state,
       // otherwise, the compaction should have already been done
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java
index 7a6b3d2..560e2f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -81,7 +82,7 @@ public class TestDataBlockEncodingTool {
         new StoreFileWriter.Builder(conf, fs)
             .withFilePath(path)
             .withFileContext(meta).build();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     byte[] FAMILY = Bytes.toBytes("cf");
     byte[] QUALIFIER = Bytes.toBytes("q");
     try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index 9c45a83..dd0ed42 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -106,7 +106,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy {
     conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
     store.storeEngine.getCompactionPolicy().setConf(conf);
     try {
-      // The modTime of the mocked store file is currentTimeMillis, so we need to increase the
+      // The modTime of the mocked store file is the current time, so we need to increase the
       // timestamp a bit to make sure that now - lowestModTime is greater than major compaction
       // period(1ms).
       // trigger an aged major compaction
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 986ffd0..ba5a181 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -211,7 +211,7 @@ public class TestDefaultMemStore {
     }
     memstorescanners = this.memstore.getScanners(mvcc.getReadPoint());
     // Assert that new values are seen in kvset as we scan.
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     count = 0;
     int snapshotIndex = 5;
     try (StoreScanner s = new StoreScanner(scan, scanInfo, null, memstorescanners)) {
@@ -577,15 +577,15 @@ public class TestDefaultMemStore {
     addRows(this.memstore);
     Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY);
     assertTrue(CellComparatorImpl.COMPARATOR.compareRows(closestToEmpty,
-        new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
+        new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0);
     for (int i = 0; i < ROW_COUNT; i++) {
       Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i),
-          System.currentTimeMillis()));
+        EnvironmentEdgeManager.currentTime()));
       if (i + 1 == ROW_COUNT) {
         assertNull(nr);
       } else {
         assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr,
-            new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0);
+            new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0);
       }
     }
     //starting from each row, validate results should contain the starting row
@@ -1022,7 +1022,7 @@ public class TestDefaultMemStore {
   protected int addRows(final MemStore hmc, final long ts) {
     for (int i = 0; i < ROW_COUNT; i++) {
       long timestamp = ts == HConstants.LATEST_TIMESTAMP ?
-        System.currentTimeMillis() : ts;
+        EnvironmentEdgeManager.currentTime() : ts;
       for (int ii = 0; ii < QUALIFIER_COUNT; ii++) {
         byte [] row = Bytes.toBytes(i);
         byte [] qf = makeQualifier(i, ii);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 48ad276..3a3e371 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.StoppableImplementation;
@@ -422,14 +423,14 @@ public class TestEndToEndSplitTransaction {
    */
   public static void blockUntilRegionSplit(Configuration conf, long timeout,
       final byte[] regionName, boolean waitForDaughters) throws IOException, InterruptedException {
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     log("blocking until region is split:" + Bytes.toStringBinary(regionName));
     RegionInfo daughterA = null, daughterB = null;
     try (Connection conn = ConnectionFactory.createConnection(conf);
         Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
       Result result = null;
       RegionInfo region = null;
-      while ((System.currentTimeMillis() - start) < timeout) {
+      while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
         result = metaTable.get(new Get(regionName));
         if (result == null) {
           break;
@@ -453,16 +454,16 @@ public class TestEndToEndSplitTransaction {
 
       // if we are here, this means the region split is complete or timed out
       if (waitForDaughters) {
-        long rem = timeout - (System.currentTimeMillis() - start);
+        long rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
         blockUntilRegionIsInMeta(conn, rem, daughterA);
 
-        rem = timeout - (System.currentTimeMillis() - start);
+        rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
         blockUntilRegionIsInMeta(conn, rem, daughterB);
 
-        rem = timeout - (System.currentTimeMillis() - start);
+        rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
         blockUntilRegionIsOpened(conf, rem, daughterA);
 
-        rem = timeout - (System.currentTimeMillis() - start);
+        rem = timeout - (EnvironmentEdgeManager.currentTime() - start);
         blockUntilRegionIsOpened(conf, rem, daughterB);
 
         // Compacting the new region to make sure references can be cleaned up
@@ -493,8 +494,8 @@ public class TestEndToEndSplitTransaction {
   public static void blockUntilRegionIsInMeta(Connection conn, long timeout, RegionInfo hri)
       throws IOException, InterruptedException {
     log("blocking until region is in META: " + hri.getRegionNameAsString());
-    long start = System.currentTimeMillis();
-    while (System.currentTimeMillis() - start < timeout) {
+    long start = EnvironmentEdgeManager.currentTime();
+    while (EnvironmentEdgeManager.currentTime() - start < timeout) {
       HRegionLocation loc = MetaTableAccessor.getRegionLocation(conn, hri);
       if (loc != null && !loc.getRegion().isOffline()) {
         log("found region in META: " + hri.getRegionNameAsString());
@@ -507,7 +508,7 @@ public class TestEndToEndSplitTransaction {
   public static void blockUntilRegionIsOpened(Configuration conf, long timeout, RegionInfo hri)
       throws IOException, InterruptedException {
     log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     try (Connection conn = ConnectionFactory.createConnection(conf);
         Table table = conn.getTable(hri.getTable())) {
       byte[] row = hri.getStartKey();
@@ -516,7 +517,7 @@ public class TestEndToEndSplitTransaction {
         row = new byte[] { '0' };
       }
       Get get = new Get(row);
-      while (System.currentTimeMillis() - start < timeout) {
+      while (EnvironmentEdgeManager.currentTime() - start < timeout) {
         try {
           table.get(get);
           break;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index fdb40c1..5105c72 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.junit.Assert;
 import org.junit.Before;
@@ -114,7 +115,7 @@ public class TestHMobStore {
   private Cell seekKey3;
   private NavigableSet<byte[]> qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR);
   private List<Cell> expected = new ArrayList<>();
-  private long id = System.currentTimeMillis();
+  private long id = EnvironmentEdgeManager.currentTime();
   private Get get = new Get(row);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final String DIR = TEST_UTIL.getDataTestDir("TestHMobStore").toString();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index ac26242..2e79bf9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -1659,11 +1659,11 @@ public class TestHRegion {
 
   private void waitForCounter(MetricsWALSource source, String metricName, long expectedCount)
       throws InterruptedException {
-    long startWait = System.currentTimeMillis();
+    long startWait = EnvironmentEdgeManager.currentTime();
     long currentCount;
     while ((currentCount = metricsAssertHelper.getCounter(metricName, source)) < expectedCount) {
       Thread.sleep(100);
-      if (System.currentTimeMillis() - startWait > 10000) {
+      if (EnvironmentEdgeManager.currentTime() - startWait > 10000) {
         fail(String.format("Timed out waiting for '%s' >= '%s', currentCount=%s", metricName,
             expectedCount, currentCount));
       }
@@ -1897,7 +1897,7 @@ public class TestHRegion {
     // Setting up region
     this.region = initHRegion(tableName, method, CONF, fam1);
     // Putting data in key
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     Put put = new Put(row1);
     put.addColumn(fam1, qf1, now, val1);
     region.put(put);
@@ -2044,7 +2044,7 @@ public class TestHRegion {
     region.put(put);
 
     // Creating put to add
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     KeyValue kv = new KeyValue(row1, fam2, qf1, ts, KeyValue.Type.Put, val2);
     put = new Put(row1);
     put.add(kv);
@@ -2496,7 +2496,7 @@ public class TestHRegion {
     // Setting up region
     this.region = initHRegion(tableName, method, CONF, fam1);
     // Putting data in key
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     Put put = new Put(row1);
     put.addColumn(fam1, qf1, now, val1);
     region.put(put);
@@ -2654,7 +2654,7 @@ public class TestHRegion {
     region.put(put);
 
     // Creating put to add
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     KeyValue kv = new KeyValue(row1, fam2, qf1, ts, KeyValue.Type.Put, val2);
     put = new Put(row1);
     put.add(kv);
@@ -3335,7 +3335,7 @@ public class TestHRegion {
       region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"), Bytes.toBytes("value")));
       // TS out of range. should error
       region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"),
-          System.currentTimeMillis() + 2000, Bytes.toBytes("value")));
+        EnvironmentEdgeManager.currentTime() + 2000, Bytes.toBytes("value")));
       fail("Expected IOE for TS out of configured timerange");
     } catch (FailedSanityCheckException ioe) {
       LOG.debug("Received expected exception", ioe);
@@ -3396,7 +3396,7 @@ public class TestHRegion {
       .setRow(row)
       .setFamily(COLUMN_FAMILY_BYTES)
       .setQualifier(qual1)
-      .setTimestamp(System.currentTimeMillis())
+      .setTimestamp(EnvironmentEdgeManager.currentTime())
       .setType(KeyValue.Type.Put.getCode())
       .setValue(value1)
       .build();
@@ -3406,7 +3406,7 @@ public class TestHRegion {
       .setRow(row)
       .setFamily(COLUMN_FAMILY_BYTES)
       .setQualifier(qual1)
-      .setTimestamp(System.currentTimeMillis())
+      .setTimestamp(EnvironmentEdgeManager.currentTime())
       .setType(KeyValue.Type.Put.getCode())
       .setValue(Bytes.toBytes("xxxxxxxxxx"))
       .build();
@@ -3551,7 +3551,7 @@ public class TestHRegion {
 
     // extract the key values out the memstore:
     // This is kinda hacky, but better than nothing...
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     AbstractMemStore memstore = (AbstractMemStore) region.getStore(fam1).memstore;
     Cell firstCell = memstore.getActive().first();
     assertTrue(firstCell.getTimestamp() <= now);
@@ -3825,7 +3825,7 @@ public class TestHRegion {
     byte[] fam4 = Bytes.toBytes("fam4");
 
     byte[][] families = { fam1, fam2, fam3, fam4 };
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
 
     // Setting up region
     this.region = initHRegion(tableName, method, CONF, families);
@@ -3883,7 +3883,7 @@ public class TestHRegion {
     byte[] fam1 = Bytes.toBytes("fam1");
     byte[][] families = { fam1 };
 
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -3936,7 +3936,7 @@ public class TestHRegion {
     byte[] fam1 = Bytes.toBytes("fam1");
     byte[][] families = { fam1 };
 
-    long ts1 = 1; // System.currentTimeMillis();
+    long ts1 = 1;
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -4070,7 +4070,7 @@ public class TestHRegion {
     byte[] fam1 = Bytes.toBytes("fam1");
     byte[][] families = { fam1 };
 
-    long ts1 = System.currentTimeMillis();
+    long ts1 = EnvironmentEdgeManager.currentTime();
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -4124,7 +4124,7 @@ public class TestHRegion {
     byte[] qf2 = Bytes.toBytes("qualifier2");
     byte[] fam1 = Bytes.toBytes("fam1");
 
-    long ts1 = 1; // System.currentTimeMillis();
+    long ts1 = 1;
     long ts2 = ts1 + 1;
     long ts3 = ts1 + 2;
 
@@ -4505,7 +4505,7 @@ public class TestHRegion {
         region.put(put);
 
         if (i != 0 && i % compactInterval == 0) {
-          LOG.debug("iteration = " + i+ " ts="+System.currentTimeMillis());
+          LOG.debug("iteration = " + i+ " ts=" + EnvironmentEdgeManager.currentTime());
           region.compact(true);
         }
 
@@ -4524,7 +4524,7 @@ public class TestHRegion {
           if (!toggle) {
             flushThread.flush();
           }
-          assertEquals("toggle="+toggle+"i=" + i + " ts="+System.currentTimeMillis(),
+          assertEquals("toggle="+toggle+"i=" + i + " ts=" + EnvironmentEdgeManager.currentTime(),
               expectedCount, res.size());
           toggle = !toggle;
         }
@@ -5612,7 +5612,7 @@ public class TestHRegion {
       builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
     }
     TableDescriptor tableDescriptor = builder.build();
-    long time = System.currentTimeMillis();
+    long time = EnvironmentEdgeManager.currentTime();
     RegionInfo primaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
       .setRegionId(time).setReplicaId(0).build();
     RegionInfo secondaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
@@ -5661,7 +5661,7 @@ public class TestHRegion {
       builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
     }
     TableDescriptor tableDescriptor = builder.build();
-    long time = System.currentTimeMillis();
+    long time = EnvironmentEdgeManager.currentTime();
     RegionInfo primaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
       .setRegionId(time).setReplicaId(0).build();
     RegionInfo secondaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
@@ -5719,7 +5719,7 @@ public class TestHRegion {
       builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
     }
     TableDescriptor tableDescriptor = builder.build();
-    long time = System.currentTimeMillis();
+    long time = EnvironmentEdgeManager.currentTime();
     RegionInfo primaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
       .setRegionId(time).setReplicaId(0).build();
     RegionInfo secondaryHri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName())
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 742d711..71eb7ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -175,7 +175,7 @@ public class TestHRegionReplayEvents {
     }
     htd = builder.build();
 
-    long time = System.currentTimeMillis();
+    long time = EnvironmentEdgeManager.currentTime();
     ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0,
       0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
     primaryHri =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index 631bc45..c0d778a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
@@ -158,7 +159,7 @@ public class TestHRegionServerBulkLoad {
         .withPath(fs, path)
         .withFileContext(context)
         .create();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       // subtract 2 since iterateOnSplits doesn't include boundary keys
       for (int i = 0; i < numRows; i++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
index 20d7a29..0cd8497 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
@@ -154,7 +154,7 @@ public class TestHStore {
   List<Cell> expected = new ArrayList<>();
   List<Cell> result = new ArrayList<>();
 
-  long id = System.currentTimeMillis();
+  long id = EnvironmentEdgeManager.currentTime();
   Get get = new Get(row);
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@@ -1058,7 +1058,7 @@ public class TestHStore {
   @Test
   public void testNumberOfMemStoreScannersAfterFlush() throws IOException {
     long seqId = 100;
-    long timestamp = System.currentTimeMillis();
+    long timestamp = EnvironmentEdgeManager.currentTime();
     Cell cell0 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
         .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put)
         .setValue(qf1).build();
@@ -1072,7 +1072,7 @@ public class TestHStore {
     testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1));
 
     seqId = 101;
-    timestamp = System.currentTimeMillis();
+    timestamp = EnvironmentEdgeManager.currentTime();
     Cell cell2 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family)
         .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put)
         .setValue(qf1).build();
@@ -1519,7 +1519,7 @@ public class TestHStore {
 
   @Test
   public void testAge() throws IOException {
-    long currentTime = System.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
     ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
     edge.setValue(currentTime);
     EnvironmentEdgeManager.injectEdge(edge);
@@ -1607,7 +1607,7 @@ public class TestHStore {
     // Set the lower threshold to invoke the "MERGE" policy
     MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() {});
     MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing();
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     long seqID = 1L;
     // Add some data to the region and do some flushes
     for (int i = 1; i < 10; i++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index f55372f..55228f7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -165,7 +166,7 @@ public class TestHStoreFile {
    */
   public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier)
     throws IOException {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) {
         for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) {
@@ -547,7 +548,7 @@ public class TestHStoreFile {
   private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception {
     float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
     Path f = writer.getPath();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < 2000; i += 2) {
       String row = String.format(localFormatter, i);
       KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"),
@@ -630,7 +631,7 @@ public class TestHStoreFile {
       .withMaxKeyCount(2000).withFileContext(meta).build();
 
     // add delete family
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < 2000; i += 2) {
       String row = String.format(localFormatter, i);
       KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"),
@@ -732,7 +733,7 @@ public class TestHStoreFile {
       StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f)
         .withBloomType(bt[x]).withMaxKeyCount(expKeys[x]).withFileContext(meta).build();
 
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       for (int i = 0; i < rowCount * 2; i += 2) { // rows
         for (int j = 0; j < colCount * 2; j += 2) { // column qualifiers
           String row = String.format(localFormatter, i);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index c281934..9c7929f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPoli
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.After;
 import org.junit.Before;
@@ -248,7 +249,7 @@ public class TestMajorCompaction {
     // should result in a compacted store file that has no references to the
     // deleted row.
     LOG.debug("Adding deletes to memstore and flushing");
-    Delete delete = new Delete(secondRowBytes, System.currentTimeMillis());
+    Delete delete = new Delete(secondRowBytes, EnvironmentEdgeManager.currentTime());
     byte[][] famAndQf = { COLUMN_FAMILY, null };
     delete.addFamily(famAndQf[0]);
     r.delete(delete);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java
index dec55a6..cba05a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKListener;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -80,7 +81,8 @@ public class TestMasterAddressTracker {
 
   @Test
   public void testDeleteIfEquals() throws Exception {
-    final ServerName sn = ServerName.valueOf("localhost", 1234, System.currentTimeMillis());
+    final ServerName sn = ServerName.valueOf("localhost", 1234,
+      EnvironmentEdgeManager.currentTime());
     final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1772);
     try {
       assertFalse("shouldn't have deleted wrong master server.",
@@ -136,7 +138,8 @@ public class TestMasterAddressTracker {
   public void testMasterAddressTrackerFromZK() throws Exception {
     // Create the master node with a dummy address
     final int infoPort = 1235;
-    final ServerName sn = ServerName.valueOf("localhost", 1234, System.currentTimeMillis());
+    final ServerName sn = ServerName.valueOf("localhost", 1234,
+      EnvironmentEdgeManager.currentTime());
     final MasterAddressTracker addressTracker = setupMasterTracker(sn, infoPort);
     try {
       assertTrue(addressTracker.hasMaster());
@@ -157,12 +160,14 @@ public class TestMasterAddressTracker {
 
   @Test
   public void testNoBackups() throws Exception {
-    final ServerName sn = ServerName.valueOf("localhost", 1234, System.currentTimeMillis());
+    final ServerName sn = ServerName.valueOf("localhost", 1234,
+      EnvironmentEdgeManager.currentTime());
     final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1772);
     try {
       assertEquals("Should receive 0 for backup not found.", 0,
-          addressTracker.getBackupMasterInfoPort(
-              ServerName.valueOf("doesnotexist.example.com", 1234, System.currentTimeMillis())));
+        addressTracker.getBackupMasterInfoPort(
+          ServerName.valueOf("doesnotexist.example.com", 1234,
+            EnvironmentEdgeManager.currentTime())));
     } finally {
       assertTrue("Couldn't clean up master",
           MasterAddressTracker.deleteIfEquals(addressTracker.getWatcher(), sn.toString()));
@@ -179,7 +184,8 @@ public class TestMasterAddressTracker {
 
   @Test
   public void testBackupMasters() throws Exception {
-    final ServerName sn = ServerName.valueOf("localhost", 5678, System.currentTimeMillis());
+    final ServerName sn = ServerName.valueOf("localhost", 5678,
+      EnvironmentEdgeManager.currentTime());
     final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1111);
     assertTrue(addressTracker.hasMaster());
     ServerName activeMaster = addressTracker.getMasterAddress();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java
index fd87c17..6c4ed45 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.After;
@@ -117,8 +118,8 @@ public class TestMemStoreSegmentsIterator {
     final byte[] f = Bytes.toBytes(FAMILY);
     final byte[] q = Bytes.toBytes(COLUMN);
     final byte[] v = Bytes.toBytes(3);
-    final KeyValue kv1 = new KeyValue(one, f, q, System.currentTimeMillis(), v);
-    final KeyValue kv2 = new KeyValue(two, f, q, System.currentTimeMillis(), v);
+    final KeyValue kv1 = new KeyValue(one, f, q, EnvironmentEdgeManager.currentTime(), v);
+    final KeyValue kv2 = new KeyValue(two, f, q, EnvironmentEdgeManager.currentTime(), v);
     // the seqId of first cell less than Integer.MAX_VALUE,
     // the seqId of second cell greater than integer.MAX_VALUE
     kv1.setSequenceId(LESS_THAN_INTEGER_MAX_VALUE_SEQ_ID);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
index 4792869..0f36996 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.TestIncrementsFromClientSide;
 import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -177,7 +178,7 @@ public class TestRegionIncrement {
   throws IOException, InterruptedException {
     final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
         TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     try {
       SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT];
       for (int i = 0; i < threads.length; i++) {
@@ -202,7 +203,8 @@ public class TestRegionIncrement {
       assertEquals(INCREMENT_COUNT * THREAD_COUNT, total);
     } finally {
       closeRegion(region);
-      LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms");
+      LOG.info(this.name.getMethodName() + " " +
+        (EnvironmentEdgeManager.currentTime() - startTime) + "ms");
     }
   }
 
@@ -213,7 +215,7 @@ public class TestRegionIncrement {
   public void testContendedAcrossCellsIncrement() throws IOException, InterruptedException {
     final HRegion region = getRegion(TEST_UTIL.getConfiguration(),
         TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName()));
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     try {
       CrossRowCellIncrementer [] threads = new CrossRowCellIncrementer[THREAD_COUNT];
       for (int i = 0; i < threads.length; i++) {
@@ -235,7 +237,8 @@ public class TestRegionIncrement {
       assertEquals(INCREMENT_COUNT * THREAD_COUNT, total);
     } finally {
       closeRegion(region);
-      LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms");
+      LOG.info(this.name.getMethodName() + " " +
+        (EnvironmentEdgeManager.currentTime() - startTime) + "ms");
     }
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
index 1bd8da6..e5402b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.MD5Hash;
 import org.junit.Assert;
@@ -349,7 +350,7 @@ public class TestRegionInfo {
   public void testParseName() throws IOException {
     final TableName tableName = TableName.valueOf(name.getMethodName());
     byte[] startKey = Bytes.toBytes("startKey");
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     int replicaId = 42;
 
     // test without replicaId
@@ -379,7 +380,7 @@ public class TestRegionInfo {
     byte[] startKey = Bytes.toBytes("startKey");
     byte[] endKey = Bytes.toBytes("endKey");
     boolean split = false;
-    long regionId = System.currentTimeMillis();
+    long regionId = EnvironmentEdgeManager.currentTime();
     int replicaId = 42;
 
     RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey)
@@ -417,7 +418,7 @@ public class TestRegionInfo {
     checkEquality(h, conf);
     // check HRIs with non-default replicaId
     h = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(startKey)
-      .setEndKey(endKey).setRegionId(System.currentTimeMillis()).setReplicaId(1).build();
+      .setEndKey(endKey).setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(1).build();
     checkEquality(h, conf);
     assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY,
       RegionInfoDisplay.getEndKeyForDisplay(h, conf));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index f15168e..b80358c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Pair;
@@ -249,9 +250,9 @@ public class TestRegionMergeTransactionOnCluster {
       ADMIN.compactRegion(mergedRegionInfo.getRegionName());
       // clean up the merged region store files
       // wait until merged region have reference file
-      long timeout = System.currentTimeMillis() + waitTime;
+      long timeout = EnvironmentEdgeManager.currentTime() + waitTime;
       int newcount = 0;
-      while (System.currentTimeMillis() < timeout) {
+      while (EnvironmentEdgeManager.currentTime() < timeout) {
         for(ColumnFamilyDescriptor colFamily : columnFamilies) {
           newcount += hrfs.getStoreFiles(colFamily.getName()).size();
         }
@@ -269,7 +270,7 @@ public class TestRegionMergeTransactionOnCluster {
         cleaner.chore();
         Thread.sleep(1000);
       }
-      while (System.currentTimeMillis() < timeout) {
+      while (EnvironmentEdgeManager.currentTime() < timeout) {
         int newcount1 = 0;
         for(ColumnFamilyDescriptor colFamily : columnFamilies) {
           newcount1 += hrfs.getStoreFiles(colFamily.getName()).size();
@@ -435,8 +436,8 @@ public class TestRegionMergeTransactionOnCluster {
       int expectedRegionNum) throws Exception {
     List<Pair<RegionInfo, ServerName>> tableRegionsInMeta;
     List<RegionInfo> tableRegionsInMaster;
-    long timeout = System.currentTimeMillis() + waitTime;
-    while (System.currentTimeMillis() < timeout) {
+    long timeout = EnvironmentEdgeManager.currentTime() + waitTime;
+    while (EnvironmentEdgeManager.currentTime() < timeout) {
       tableRegionsInMeta =
           MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename);
       tableRegionsInMaster =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
index 4719d1c..33e6988 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -117,7 +118,7 @@ public class TestRegionOpen {
     // Create new HRI with non-default region replica id
     RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName())
       .setStartKey(Bytes.toBytes("A")).setEndKey(Bytes.toBytes("B"))
-      .setRegionId(System.currentTimeMillis()).setReplicaId(2).build();
+      .setRegionId(EnvironmentEdgeManager.currentTime()).setReplicaId(2).build();
     HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
       CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri);
     Path regionDir = regionFs.getRegionDir();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
index 504a140..5ecddb0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -295,8 +296,8 @@ public class TestRegionReplicas {
       // force compaction
       HTU.compact(table.getName(), true);
 
-      long wakeUpTime = System.currentTimeMillis() + 4 * refreshPeriod;
-      while (System.currentTimeMillis() < wakeUpTime) {
+      long wakeUpTime = EnvironmentEdgeManager.currentTime() + 4 * refreshPeriod;
+      while (EnvironmentEdgeManager.currentTime() < wakeUpTime) {
         assertGetRpc(hriSecondary, 42, true);
         assertGetRpc(hriSecondary, 1042, true);
         assertGetRpc(hriSecondary, 2042, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java
index cdbc092..1cb1c02 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -111,9 +112,9 @@ public class TestRegionServerAbortTimeout {
     // Abort one region server
     UTIL.getMiniHBaseCluster().getRegionServer(0).abort("Abort RS for test");
 
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     long timeout = REGIONS_NUM * SLEEP_TIME_WHEN_CLOSE_REGION * 10;
-    while (System.currentTimeMillis() - startTime < timeout) {
+    while (EnvironmentEdgeManager.currentTime() - startTime < timeout) {
       if (UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() == 1) {
         assertTrue("Abort timer task should be scheduled", abortTimeoutTaskScheduled);
         return;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index e6b8462..12c31ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -37,9 +37,9 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
@@ -242,7 +242,7 @@ public class TestRegionServerReportForDuty {
   }
 
   /**
-   * Tests region sever reportForDuty with manual environment edge
+   * Tests region sever reportForDuty with a non-default environment edge
    */
   @Test
   public void testReportForDutyWithEnvironmentEdge() throws Exception {
@@ -256,15 +256,14 @@ public class TestRegionServerReportForDuty {
     cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
     cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 1);
 
-    // Inject manual environment edge for clock skew computation between RS and master
-    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
+    // Inject non-default environment edge
+    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
     EnvironmentEdgeManager.injectEdge(edge);
     master = cluster.addMaster();
     rs = cluster.addRegionServer();
     LOG.debug("Starting master: " + master.getMaster().getServerName());
     master.start();
     rs.start();
-
     waitForClusterOnline(master);
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
index de87272..d434736 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -83,7 +84,7 @@ public class TestReversibleScanners {
   HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
   private static byte[] FAMILYNAME = Bytes.toBytes("testCf");
-  private static long TS = System.currentTimeMillis();
+  private static long TS = EnvironmentEdgeManager.currentTime();
   private static int MAXMVCC = 7;
   private static byte[] ROW = Bytes.toBytes("testRow");
   private static final int ROWSIZE = 200;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java
index 166783f..52e0461 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.BloomFilterUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -149,7 +150,7 @@ public class TestRowPrefixBloomFilter {
         .withMaxKeyCount(expKeys)
         .withFileContext(meta)
         .build();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       //Put with valid row style
       for (int i = 0; i < prefixRowCount; i += 2) { // prefix rows
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
index f0d8649..b60a215 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.filter.WhileMatchFilter;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
@@ -257,7 +258,7 @@ public class TestScanner {
 
       // Write information to the meta table
 
-      Put put = new Put(ROW_KEY, System.currentTimeMillis());
+      Put put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime());
 
       put.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
           RegionInfo.toByteArray(REGION_INFO));
@@ -284,7 +285,7 @@ public class TestScanner {
 
       String address = HConstants.LOCALHOST_IP + ":" + HBaseTestingUtility.randomFreePort();
 
-      put = new Put(ROW_KEY, System.currentTimeMillis());
+      put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime());
       put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
           Bytes.toBytes(address));
 
@@ -321,7 +322,7 @@ public class TestScanner {
 
       address = "bar.foo.com:4321";
 
-      put = new Put(ROW_KEY, System.currentTimeMillis());
+      put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime());
 
       put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(address));
       table.put(put);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
index af4bdc7..8cd4d16 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -88,7 +89,7 @@ public class TestScannerWithBulkload {
   @Test
   public void testBulkLoad() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    long l = System.currentTimeMillis();
+    long l = EnvironmentEdgeManager.currentTime();
     Admin admin = TEST_UTIL.getAdmin();
     createTable(admin, tableName);
     Scan scan = createScan();
@@ -184,7 +185,8 @@ public class TestScannerWithBulkload {
       writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
     }
     else {
-    writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
+      writer.appendFileInfo(BULKLOAD_TIME_KEY,
+        Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
     }
     writer.close();
     return hfilePath;
@@ -223,7 +225,7 @@ public class TestScannerWithBulkload {
   @Test
   public void testBulkLoadWithParallelScan() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-      final long l = System.currentTimeMillis();
+    final long l = EnvironmentEdgeManager.currentTime();
     final Admin admin = TEST_UTIL.getAdmin();
     createTable(admin, tableName);
     Scan scan = createScan();
@@ -265,7 +267,7 @@ public class TestScannerWithBulkload {
   @Test
   public void testBulkLoadNativeHFile() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
-    long l = System.currentTimeMillis();
+    long l = EnvironmentEdgeManager.currentTime();
     Admin admin = TEST_UTIL.getAdmin();
     createTable(admin, tableName);
     Scan scan = createScan();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java
index 2a3f018..4a2cd34 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -61,7 +62,7 @@ public class TestSimpleTimeRangeTracker {
   @Test
   public void testTimeRangeInitialized() {
     TimeRangeTracker src = getTimeRangeTracker();
-    TimeRange tr = TimeRange.from(System.currentTimeMillis());
+    TimeRange tr = TimeRange.from(EnvironmentEdgeManager.currentTime());
     assertFalse(src.includesTimeRange(tr));
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
index f1cb1c9..8238406 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -109,7 +110,7 @@ public class TestStoreFileScannerWithTagCompression {
   private void writeStoreFile(final StoreFileWriter writer) throws IOException {
     byte[] fam = Bytes.toBytes("f");
     byte[] qualifier = Bytes.toBytes("q");
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     byte[] b = Bytes.toBytes("k1");
     Tag t1 = new ArrayBackedTag((byte) 1, "tag1");
     Tag t2 = new ArrayBackedTag((byte) 2, "tag2");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
index ec71545..5e30910 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -690,7 +691,7 @@ public class TestStoreScanner {
 
   @Test
   public void testDeleteVersionMaskingMultiplePuts() throws IOException {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     KeyValue [] kvs1 = new KeyValue[] {
         create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
         create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care")
@@ -716,7 +717,7 @@ public class TestStoreScanner {
 
   @Test
   public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     KeyValue [] kvs1 = new KeyValue[] {
         create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
         create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care")
@@ -877,7 +878,7 @@ public class TestStoreScanner {
    */
   @Test
   public void testWildCardTtlScan() throws IOException {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     KeyValue [] kvs = new KeyValue[] {
         create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"),
         create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
@@ -943,7 +944,7 @@ public class TestStoreScanner {
    */
   @Test
   public void testExpiredDeleteFamily() throws Exception {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     KeyValue[] kvs = new KeyValue[] {
       new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000,
         KeyValue.Type.DeleteFamily),
@@ -970,7 +971,7 @@ public class TestStoreScanner {
   @Test
   public void testDeleteMarkerLongevity() throws Exception {
     try {
-      final long now = System.currentTimeMillis();
+      final long now = EnvironmentEdgeManager.currentTime();
       EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() {
         @Override
         public long currentTime() {
@@ -1040,7 +1041,7 @@ public class TestStoreScanner {
 
   @Test
   public void testPreadNotEnabledForCompactionStoreScanners() throws Exception {
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     KeyValue[] kvs = new KeyValue[] {
       new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000,
         KeyValue.Type.DeleteFamily),
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 687902d..ffb4c7f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -229,7 +230,7 @@ public class TestWALLockup {
       Put put = new Put(bytes);
       put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
       WALKeyImpl key = new WALKeyImpl(region.getRegionInfo().getEncodedNameAsBytes(),
-        TableName.META_TABLE_NAME, System.currentTimeMillis(), mvcc, scopes);
+        TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime(), mvcc, scopes);
       WALEdit edit = new WALEdit();
       CellScanner CellScanner = put.cellScanner();
       assertTrue(CellScanner.advance());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
index ea32a33..f8a95d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
@@ -104,7 +105,7 @@ public class TestWideScanner {
     for (char c = 'a'; c <= 'c'; c++) {
       byte[] row = Bytes.toBytes("ab" + c);
       int i, j;
-      long ts = System.currentTimeMillis();
+      long ts = EnvironmentEdgeManager.currentTime();
       for (i = 0; i < 100; i++) {
         byte[] b = Bytes.toBytes(String.format("%10d", i));
         for (j = 0; j < 100; j++) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java
index d8a2274..baa2267 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -48,7 +49,7 @@ public class TestCloseChecker {
 
     Configuration conf = new Configuration();
 
-    long currentTime = System.currentTimeMillis();
+    long currentTime = EnvironmentEdgeManager.currentTime();
 
     conf.setInt(SIZE_LIMIT_KEY, 10);
     conf.setLong(TIME_LIMIT_KEY, 10);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
index 9fee333..3090572 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
@@ -216,7 +216,7 @@ public class TestFIFOCompactionPolicy {
         .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
         .build();
     Table table = TEST_UTIL.createTable(desc, null);
-    long ts = System.currentTimeMillis() - 10 * 1000;
+    long ts = EnvironmentEdgeManager.currentTime() - 10 * 1000;
     Put put =
         new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
     table.put(put);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
index 9198bd5..3157e33 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -125,18 +126,18 @@ public class TestCompactionWithThroughputController {
     try {
       HStore store = prepareData();
       assertEquals(10, store.getStorefilesCount());
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       TEST_UTIL.getAdmin().majorCompact(tableName);
       while (store.getStorefilesCount() != 1) {
         Thread.sleep(20);
       }
-      long duration = System.currentTimeMillis() - startTime;
+      long duration = EnvironmentEdgeManager.currentTime() - startTime;
       double throughput = (double) store.getStorefilesSize() / duration * 1000;
       // confirm that the speed limit work properly(not too fast, and also not too slow)
       // 20% is the max acceptable error rate.
       assertTrue(throughput < throughputLimit * 1.2);
       assertTrue(throughput > throughputLimit * 0.8);
-      return System.currentTimeMillis() - startTime;
+      return EnvironmentEdgeManager.currentTime() - startTime;
     } finally {
       TEST_UTIL.shutdownMiniCluster();
     }
@@ -154,12 +155,12 @@ public class TestCompactionWithThroughputController {
     try {
       HStore store = prepareData();
       assertEquals(10, store.getStorefilesCount());
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       TEST_UTIL.getAdmin().majorCompact(tableName);
       while (store.getStorefilesCount() != 1) {
         Thread.sleep(20);
       }
-      return System.currentTimeMillis() - startTime;
+      return EnvironmentEdgeManager.currentTime() - startTime;
     } finally {
       TEST_UTIL.shutdownMiniCluster();
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index 9c2340c..c43d49b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -181,7 +181,7 @@ public abstract class AbstractTestFSWAL {
       throws IOException {
     final byte[] row = Bytes.toBytes(cf);
     for (int i = 0; i < times; i++) {
-      long timestamp = System.currentTimeMillis();
+      long timestamp = EnvironmentEdgeManager.currentTime();
       WALEdit cols = new WALEdit();
       cols.add(new KeyValue(row, row, row, timestamp, row));
       WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), htd.getTableName(),
@@ -390,7 +390,7 @@ public abstract class AbstractTestFSWAL {
     final String name = "testFailedToCreateWALIfParentRenamed";
     AbstractFSWAL<?> wal = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), name,
       HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null);
-    long filenum = System.currentTimeMillis();
+    long filenum = EnvironmentEdgeManager.currentTime();
     Path path = wal.computeFilename(filenum);
     wal.createWriterInstance(path);
     Path parent = path.getParent();
@@ -469,7 +469,7 @@ public abstract class AbstractTestFSWAL {
       for (int i = 0; i < countPerFamily; i++) {
         final RegionInfo info = region.getRegionInfo();
         final WALKeyImpl logkey = new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
-            System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes);
+          EnvironmentEdgeManager.currentTime(), clusterIds, -1, -1, region.getMVCC(), scopes);
         wal.append(info, logkey, edits, true);
         region.getMVCC().completeAndWait(logkey.getWriteEntry());
       }
@@ -511,7 +511,7 @@ public abstract class AbstractTestFSWAL {
     for (byte[] fam : td.getColumnFamilyNames()) {
       scopes.put(fam, 0);
     }
-    long timestamp = System.currentTimeMillis();
+    long timestamp = EnvironmentEdgeManager.currentTime();
     byte[] row = Bytes.toBytes("row");
     WALEdit cols = new WALEdit();
     cols.add(new KeyValue(row, row, row, timestamp, row));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
index 9322c5e..e3da0ec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -140,7 +141,7 @@ public abstract class AbstractTestLogRollPeriod {
     });
 
     // Sleep until we should get at least min-LogRoll events
-    long wtime = System.currentTimeMillis();
+    long wtime = EnvironmentEdgeManager.currentTime();
     Thread.sleep((minRolls + 1) * LOG_ROLL_PERIOD);
     // Do some extra sleep in case the machine is slow,
     // and the log-roll is not triggered exactly on LOG_ROLL_PERIOD.
@@ -148,7 +149,7 @@ public abstract class AbstractTestLogRollPeriod {
     for (int retry = 0; paths.size() < minRolls && retry < NUM_RETRIES; ++retry) {
       Thread.sleep(LOG_ROLL_PERIOD / 4);
     }
-    wtime = System.currentTimeMillis() - wtime;
+    wtime = EnvironmentEdgeManager.currentTime() - wtime;
     LOG.info(String.format("got %d rolls after %dms (%dms each) - expected at least %d rolls",
                            paths.size(), wtime, wtime / paths.size(), minRolls));
     assertFalse(paths.size() < minRolls);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
index 5098609..43095d34 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALProvider;
 import org.junit.After;
@@ -110,7 +111,7 @@ public abstract class AbstractTestProtobufLog {
     int recordCount = 5;
     TableName tableName = TableName.valueOf("tablename");
     byte[] row = Bytes.toBytes("row");
-    long timestamp = System.currentTimeMillis();
+    long timestamp = EnvironmentEdgeManager.currentTime();
     Path path = new Path(dir, "tempwal");
     // delete the log if already exists, for test only
     fs.delete(path, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index 5a9de40..d967f5f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -156,7 +156,8 @@ public abstract class AbstractTestWALReplay {
     this.hbaseRootDir = CommonFSUtils.getRootDir(this.conf);
     this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
     String serverName =
-      ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis())
+      ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010,
+        EnvironmentEdgeManager.currentTime())
         .toString();
     this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName);
     this.logDir = new Path(this.hbaseRootDir, logName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
index 5ca5eb4..7b342a1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FutureUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -136,7 +137,7 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL {
     for (byte[] fam : td.getColumnFamilyNames()) {
       scopes.put(fam, 0);
     }
-    long timestamp = System.currentTimeMillis();
+    long timestamp = EnvironmentEdgeManager.currentTime();
     String testName = currentTest.getMethodName();
     AtomicInteger failedCount = new AtomicInteger(0);
     try (LogRoller roller = new LogRoller(services);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
index f73b4f1..50bb277 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
 import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter;
 import org.apache.hadoop.hbase.wal.WALFactory;
@@ -99,7 +100,7 @@ public class TestCombinedAsyncWriter {
     int recordCount = 5;
     TableName tableName = TableName.valueOf("tablename");
     byte[] row = Bytes.toBytes("row");
-    long timestamp = System.currentTimeMillis();
+    long timestamp = EnvironmentEdgeManager.currentTime();
     Path path1 = getPath(1);
     Path path2 = getPath(2);
     FileSystem fs = UTIL.getTestFileSystem();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
index 2ba70ad..469b289 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
@@ -124,7 +125,8 @@ public class TestDurability {
   @Test
   public void testDurability() throws Exception {
     WALFactory wals = new WALFactory(CONF,
-        ServerName.valueOf("TestDurability", 16010, System.currentTimeMillis()).toString());
+      ServerName.valueOf("TestDurability", 16010, EnvironmentEdgeManager.currentTime())
+        .toString());
     HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
     WAL wal = region.getWAL();
     HRegion deferredRegion = createHRegion(region.getTableDescriptor(), region.getRegionInfo(),
@@ -188,7 +190,8 @@ public class TestDurability {
 
     // Setting up region
     WALFactory wals = new WALFactory(CONF,
-        ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
+      ServerName.valueOf("TestIncrement", 16010, EnvironmentEdgeManager.currentTime())
+        .toString());
     HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
     WAL wal = region.getWAL();
 
@@ -253,9 +256,9 @@ public class TestDurability {
 
     // Setting up region
     WALFactory wals = new WALFactory(CONF,
-        ServerName
-            .valueOf("testIncrementWithReturnResultsSetToFalse", 16010, System.currentTimeMillis())
-            .toString());
+      ServerName.valueOf("testIncrementWithReturnResultsSetToFalse",
+        16010, EnvironmentEdgeManager.currentTime())
+          .toString());
     HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
 
     Increment inc1 = new Increment(row1);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
index d0274de..9d32594 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -189,7 +190,7 @@ public class TestLogRollAbort {
   public void testLogRollAfterSplitStart() throws IOException {
     LOG.info("Verify wal roll after split starts will fail.");
     String logName = ServerName.valueOf("testLogRollAfterSplitStart",
-        16010, System.currentTimeMillis()).toString();
+        16010, EnvironmentEdgeManager.currentTime()).toString();
     Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
     final WALFactory wals = new WALFactory(conf, logName);
 
@@ -208,7 +209,7 @@ public class TestLogRollAbort {
         NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
         scopes.put(Bytes.toBytes("column"), 0);
         log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName,
-          System.currentTimeMillis(), mvcc, scopes), kvs);
+          EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs);
       }
       // Send the data to HDFS datanodes and close the HDFS writer
       log.sync();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index 0712b59..8b4b710 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.RecoverLeaseFSUtils;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
@@ -289,7 +290,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
     }
     Put tmpPut = new Put(Bytes.toBytes("tmprow"));
     tmpPut.addColumn(HConstants.CATALOG_FAMILY, null, value);
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     long remaining = timeout;
     while (remaining > 0) {
       if (log.isLowReplicationRollEnabled() == expect) {
@@ -302,7 +303,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
         } catch (InterruptedException e) {
           // continue
         }
-        remaining = timeout - (System.currentTimeMillis() - startTime);
+        remaining = timeout - (EnvironmentEdgeManager.currentTime() - startTime);
       }
     }
   }
@@ -367,7 +368,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
 
     writeData(table, 2);
 
-    long curTime = System.currentTimeMillis();
+    long curTime = EnvironmentEdgeManager.currentTime();
     LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName());
     long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
     assertTrue("Log should have a timestamp older than now",
@@ -462,7 +463,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
 
       writeData(table, 1002);
 
-      long curTime = System.currentTimeMillis();
+      long curTime = EnvironmentEdgeManager.currentTime();
       LOG.info("log.getCurrentFileName()): " + AbstractFSWALProvider.getCurrentFileName(log));
       long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
       assertTrue("Log should have a timestamp older than now",
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
index ac89c92..96d2a42 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -162,7 +163,7 @@ public class TestLogRollingNoCluster {
         FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration());
         TableDescriptor htd = tds.get(TableName.META_TABLE_NAME);
         for (int i = 0; i < this.count; i++) {
-          long now = System.currentTimeMillis();
+          long now = EnvironmentEdgeManager.currentTime();
           // Roll every ten edits
           if (i % 10 == 0) {
             this.wal.rollWriter();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java
index 2d72618..511161c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterfa
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -125,7 +126,7 @@ import org.junit.experimental.categories.Category;
     final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size();
     // for each RS, create an empty wal with same walGroupId
     final List<Path> emptyWalPaths = new ArrayList<>();
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < numRs; i++) {
       RegionInfo regionInfo =
         UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo();
@@ -165,7 +166,7 @@ import org.junit.experimental.categories.Category;
     final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size();
     // for each RS, create an empty wal with same walGroupId
     final List<Path> emptyWalPaths = new ArrayList<>();
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     for (int i = 0; i < numRs; i++) {
       RegionInfo regionInfo =
         UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo();
@@ -223,8 +224,7 @@ import org.junit.experimental.categories.Category;
     final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size();
     // for each RS, create an empty wal with same walGroupId
     final List<Path> emptyWalPaths = new ArrayList<>();
-
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     WAL wal = null;
     for (int i = 0; i < numRs; i++) {
       RegionInfo regionInfo =
@@ -279,8 +279,7 @@ import org.junit.experimental.categories.Category;
     final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size();
     // for each RS, create an empty wal with same walGroupId
     final List<Path> emptyWalPaths = new ArrayList<>();
-
-    long ts = System.currentTimeMillis();
+    long ts = EnvironmentEdgeManager.currentTime();
     WAL wal = null;
     for (int i = 0; i < numRs; i++) {
       RegionInfo regionInfo =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
index b972c5f..7d33ce6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
@@ -420,7 +421,7 @@ public class TestReplicationEndpoint extends TestReplicationBase {
 
   private Entry createEntry(String tableName, TreeMap<byte[], Integer> scopes, byte[]... kvs) {
     WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf(tableName),
-        System.currentTimeMillis() - 1L,
+      EnvironmentEdgeManager.currentTime() - 1L,
         scopes);
     WALEdit edit1 = new WALEdit();
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
index c245726..360424a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
@@ -84,14 +85,14 @@ public class TestReplicationKillRS extends TestReplicationBase {
     }
 
     int lastCount = 0;
-    final long start = System.currentTimeMillis();
+    final long start = EnvironmentEdgeManager.currentTime();
     int i = 0;
     try (Connection conn = ConnectionFactory.createConnection(CONF2)) {
       try (Table table = conn.getTable(tableName)) {
         while (true) {
           if (i == NB_RETRIES - 1) {
             fail("Waited too much time for queueFailover replication. " + "Waited "
-                + (System.currentTimeMillis() - start) + "ms.");
+                + (EnvironmentEdgeManager.currentTime() - start) + "ms.");
           }
           Result[] res2;
           try (ResultScanner scanner = table.getScanner(new Scan())) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index 8178a23..625fb59 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -316,7 +316,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
     assertEquals(NB_ROWS_IN_BIG_BATCH, res.length);
 
     LOG.info("Looking in table2 for replicated rows in testLoading");
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     // Retry more than NB_RETRIES. As it was, retries were done in 5 seconds and we'd fail
     // sometimes.
     final long retries = NB_RETRIES * 10;
@@ -337,7 +337,8 @@ public class TestReplicationSmallTests extends TestReplicationBase {
           }
           LOG.error("Last row: " + lastRow);
           fail("Waited too much time for normal batch replication, " + res.length + " instead of "
-              + NB_ROWS_IN_BIG_BATCH + "; waited=" + (System.currentTimeMillis() - start) + "ms");
+            + NB_ROWS_IN_BIG_BATCH + "; waited="
+            + (EnvironmentEdgeManager.currentTime() - start) + "ms");
         } else {
           LOG.info("Only got " + res.length + " rows... retrying");
           Thread.sleep(SLEEP_TIME);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
index 3a6cfd4..a0d5cc9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
@@ -68,14 +69,14 @@ public class TestReplicationWALEntryFilters {
     // meta
     WALKeyImpl key1 =
       new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
-        TableName.META_TABLE_NAME, System.currentTimeMillis());
+        TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime());
     Entry metaEntry = new Entry(key1, null);
 
     assertNull(filter.filter(metaEntry));
 
     // user table
     WALKeyImpl key3 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"),
-        System.currentTimeMillis());
+      EnvironmentEdgeManager.currentTime());
     Entry userEntry = new Entry(key3, null);
 
     assertEquals(userEntry, filter.filter(userEntry));
@@ -479,7 +480,7 @@ public class TestReplicationWALEntryFilters {
 
   private Entry createEntry(TreeMap<byte[], Integer> scopes, byte[]... kvs) {
     WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"),
-      System.currentTimeMillis(), scopes);
+      EnvironmentEdgeManager.currentTime(), scopes);
     WALEdit edit1 = new WALEdit();
 
     for (byte[] kv : kvs) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
index d01a0ac..2028127 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
@@ -80,7 +81,7 @@ public class TestRecoverStandbyProcedure {
 
   private static final byte[] qualifier = Bytes.toBytes("q");
 
-  private static final long timestamp = System.currentTimeMillis();
+  private static final long timestamp = EnvironmentEdgeManager.currentTime();
 
   private static final int ROW_COUNT = 1000;
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
index f49f9b9..b216af1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestDumpReplicationQueues.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -62,7 +63,7 @@ public class TestDumpReplicationQueues {
     when(zkWatcherMock.getRecoverableZooKeeper()).thenReturn(recoverableZooKeeperMock);
     when(zkWatcherMock.getZNodePaths()).thenReturn(zNodePath);
     List<String> nodes = new ArrayList<>();
-    String server = "rs1,60030,"+System.currentTimeMillis();
+    String server = "rs1,60030," + EnvironmentEdgeManager.currentTime();
     nodes.add(server);
     when(recoverableZooKeeperMock.getChildren("/hbase/rs", null)).thenReturn(nodes);
     when(recoverableZooKeeperMock.getChildren("/hbase/replication/rs", null)).
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
index f63a923..b20b67f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestHBaseInterClusterReplicationEndpointFilterEdits.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
@@ -97,15 +98,16 @@ public class TestHBaseInterClusterReplicationEndpointFilterEdits {
   public void testFilterNotExistColumnFamilyEdits() {
     List<List<Entry>> entryList = new ArrayList<>();
     // should be filtered
-    Cell c1 = new KeyValue(ROW, NON_EXISTING_FAMILY, QUALIFIER, System.currentTimeMillis(),
-        Type.Put, VALUE);
-    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE1, System.currentTimeMillis()),
-        new WALEdit().add(c1));
+    Cell c1 = new KeyValue(ROW, NON_EXISTING_FAMILY, QUALIFIER,
+      EnvironmentEdgeManager.currentTime(), Type.Put, VALUE);
+    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE1,
+      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c1));
     entryList.add(Lists.newArrayList(e1));
     // should be kept
-    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, System.currentTimeMillis(), Type.Put, VALUE);
-    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, System.currentTimeMillis()),
-        new WALEdit().add(c2));
+    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(),
+      Type.Put, VALUE);
+    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1,
+      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c2));
     entryList.add(Lists.newArrayList(e2, e1));
     List<List<Entry>> filtered = endpoint.filterNotExistColumnFamilyEdits(entryList);
     assertEquals(1, filtered.size());
@@ -118,14 +120,16 @@ public class TestHBaseInterClusterReplicationEndpointFilterEdits {
   public void testFilterNotExistTableEdits() {
     List<List<Entry>> entryList = new ArrayList<>();
     // should be filtered
-    Cell c1 = new KeyValue(ROW, FAMILY, QUALIFIER, System.currentTimeMillis(), Type.Put, VALUE);
-    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2, System.currentTimeMillis()),
-        new WALEdit().add(c1));
+    Cell c1 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(),
+      Type.Put, VALUE);
+    Entry e1 = new Entry(new WALKeyImpl(new byte[32], TABLE2,
+      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c1));
     entryList.add(Lists.newArrayList(e1));
     // should be kept
-    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, System.currentTimeMillis(), Type.Put, VALUE);
-    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1, System.currentTimeMillis()),
-        new WALEdit().add(c2));
+    Cell c2 = new KeyValue(ROW, FAMILY, QUALIFIER, EnvironmentEdgeManager.currentTime(),
+      Type.Put, VALUE);
+    Entry e2 = new Entry(new WALKeyImpl(new byte[32], TABLE1,
+      EnvironmentEdgeManager.currentTime()), new WALEdit().add(c2));
     entryList.add(Lists.newArrayList(e2));
     List<List<Entry>> filtered = endpoint.filterNotExistTableEdits(entryList);
     assertEquals(1, filtered.size());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
index ffe0957..eb0cda2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HFileTestUtil;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -436,7 +437,7 @@ public class TestReplicationSink {
     } catch (InterruptedException e) {
       LOG.info("Was interrupted while sleep, meh", e);
     }
-    final long now = System.currentTimeMillis();
+    final long now = EnvironmentEdgeManager.currentTime();
     KeyValue kv = null;
     if(type.getCode() == KeyValue.Type.Put.getCode()) {
       kv = new KeyValue(rowBytes, fam, fam, now,
@@ -463,7 +464,7 @@ public class TestReplicationSink {
     uuidBuilder.setMostSigBits(HConstants.DEFAULT_CLUSTER_ID.getMostSignificantBits());
     keyBuilder.setClusterId(uuidBuilder.build());
     keyBuilder.setTableName(UnsafeByteOperations.unsafeWrap(table.getName()));
-    keyBuilder.setWriteTime(System.currentTimeMillis());
+    keyBuilder.setWriteTime(EnvironmentEdgeManager.currentTime());
     keyBuilder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(HConstants.EMPTY_BYTE_ARRAY));
     keyBuilder.setLogSequenceNumber(-1);
     builder.setKey(keyBuilder.build());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 3528072..9d9a5fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -300,7 +300,8 @@ public abstract class TestReplicationSourceManager {
       }
       LOG.info(Long.toString(i));
       final long txid = wal.appendData(hri,
-        new WALKeyImpl(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc, scopes),
+        new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(),
+          mvcc, scopes),
         edit);
       wal.sync(txid);
     }
@@ -314,7 +315,8 @@ public abstract class TestReplicationSourceManager {
 
     for (int i = 0; i < 3; i++) {
       wal.appendData(hri,
-        new WALKeyImpl(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc, scopes),
+        new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(),
+          mvcc, scopes),
         edit);
     }
     wal.sync();
@@ -336,7 +338,8 @@ public abstract class TestReplicationSourceManager {
       new WALEntryBatch(0, manager.getSources().get(0).getCurrentPath()));
 
     wal.appendData(hri,
-      new WALKeyImpl(hri.getEncodedNameAsBytes(), test, System.currentTimeMillis(), mvcc, scopes),
+      new WALKeyImpl(hri.getEncodedNameAsBytes(), test, EnvironmentEdgeManager.currentTime(),
+        mvcc, scopes),
       edit);
     wal.sync();
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java
index 3ba26f3..50fbe0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -109,8 +110,8 @@ public class TestSerialReplicationEndpoint {
 
     try (Table table = CONN.getTable(tableName)) {
       for (int i = 0; i < cellNum; i++) {
-        Put put = new Put(Bytes.toBytes(i)).addColumn(family, qualifier, System.currentTimeMillis(),
-          Bytes.toBytes(i));
+        Put put = new Put(Bytes.toBytes(i)).addColumn(family, qualifier,
+          EnvironmentEdgeManager.currentTime(), Bytes.toBytes(i));
         table.put(put);
       }
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java
index 3442f98..79cae4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.replication.WALEntryFilter;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
@@ -399,8 +400,8 @@ public class TestWALEntryStream {
     attributes.put("foo", Bytes.toBytes("foo-value"));
     attributes.put("bar", Bytes.toBytes("bar-value"));
     WALKeyImpl key = new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
-        System.currentTimeMillis(), new ArrayList<UUID>(), 0L, 0L,
-        mvcc, scopes, attributes);
+      EnvironmentEdgeManager.currentTime(), new ArrayList<UUID>(), 0L, 0L,
+      mvcc, scopes, attributes);
     Assert.assertEquals(attributes, key.getExtendedAttributes());
 
     WALProtos.WALKey.Builder builder = key.getBuilder(WALCellCodec.getNoneCompressor());
@@ -661,8 +662,8 @@ public class TestWALEntryStream {
 
   private void appendToLog(String key) throws IOException {
     final long txid = log.appendData(info,
-      new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(),
-          mvcc, scopes), getWALEdit(key));
+      new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
+        EnvironmentEdgeManager.currentTime(), mvcc, scopes), getWALEdit(key));
     log.sync(txid);
   }
 
@@ -685,14 +686,14 @@ public class TestWALEntryStream {
 
   private long appendToLog(int count) throws IOException {
     return log.appendData(info, new WALKeyImpl(info.getEncodedNameAsBytes(), tableName,
-      System.currentTimeMillis(), mvcc, scopes), getWALEdits(count));
+      EnvironmentEdgeManager.currentTime(), mvcc, scopes), getWALEdits(count));
   }
 
   private WALEdit getWALEdits(int count) {
     WALEdit edit = new WALEdit();
     for (int i = 0; i < count; i++) {
-      edit.add(new KeyValue(Bytes.toBytes(System.currentTimeMillis()), family, qualifier,
-          System.currentTimeMillis(), qualifier));
+      edit.add(new KeyValue(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), family,
+        qualifier, EnvironmentEdgeManager.currentTime(), qualifier));
     }
     return edit;
   }
@@ -700,7 +701,8 @@ public class TestWALEntryStream {
   private WALEdit getWALEdit(String row) {
     WALEdit edit = new WALEdit();
     edit.add(
-      new KeyValue(Bytes.toBytes(row), family, qualifier, System.currentTimeMillis(), qualifier));
+      new KeyValue(Bytes.toBytes(row), family, qualifier, EnvironmentEdgeManager.currentTime(),
+        qualifier));
     return edit;
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java
index dc3144f..5c58a5d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/EnableRSGroupsTestBase.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -69,8 +70,8 @@ public abstract class EnableRSGroupsTestBase {
     // wait RSGroupBasedLoadBalancer online
     RSGroupBasedLoadBalancer loadBalancer =
       (RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer();
-    long start = System.currentTimeMillis();
-    while (System.currentTimeMillis() - start <= 60000 && !loadBalancer.isOnline()) {
+    long start = EnvironmentEdgeManager.currentTime();
+    while (EnvironmentEdgeManager.currentTime() - start <= 60000 && !loadBalancer.isOnline()) {
       LOG.info("waiting for rsgroup load balancer onLine...");
       sleep(200);
     }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
index f31e80f..7bd2cd1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RSGroupTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -508,12 +509,12 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
     RegionStateNode rsn, AtomicBoolean changed) {
     return new Thread(() -> {
       LOG.info("thread1 start running, will recover region state");
-      long current = System.currentTimeMillis();
+      long current = EnvironmentEdgeManager.currentTime();
       // wait until there is only left the region we changed state and recover its state.
       // wait time is set according to the number of max retries, all except failed regions will be
       // moved in one retry, and will sleep 1s until next retry.
-      while (System.currentTimeMillis() -
-        current <= RSGroupInfoManagerImpl.DEFAULT_MAX_RETRY_VALUE * 1000) {
+      while (EnvironmentEdgeManager.currentTime() -
+          current <= RSGroupInfoManagerImpl.DEFAULT_MAX_RETRY_VALUE * 1000) {
         List<RegionInfo> regions = getRegions.apply(owner);
         LOG.debug("server table region size is:{}", regions.size());
         assert regions.size() >= 1;
@@ -709,9 +710,9 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
     String rsGroup2 = "rsGroup2";
     ADMIN.addRSGroup(rsGroup2);
 
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     ADMIN.moveServersToRSGroup(Sets.newHashSet(newGroup.getServers().first()), rsGroup2);
-    long timeTaken = System.currentTimeMillis() - startTime;
+    long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
     String msg =
       "Should not take mote than 15000 ms to move a table with 100 regions. Time taken  ="
         + timeTaken + " ms";
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 905cb48..4b0d73a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -115,6 +115,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.security.GroupMappingServiceProvider;
@@ -1135,7 +1136,7 @@ public class TestAccessController extends SecureTestUtil {
         byte[] family, byte[] qualifier,
         byte[] startKey, byte[] endKey, int numRows) throws IOException {
       HFile.Writer writer = null;
-      long now = System.currentTimeMillis();
+      long now = EnvironmentEdgeManager.currentTime();
       try {
         HFileContext context = new HFileContextBuilder().build();
         writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java
index f3035a9..0bd1e74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java
@@ -161,7 +161,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil {
             Table t = connection.getTable(testTable.getTableName())) {
           Put p;
           // with ro ACL
-          long now = System.currentTimeMillis();
+          long now = EnvironmentEdgeManager.currentTime();
           p = new Put(TEST_ROW).addColumn(TEST_FAMILY1, TEST_Q1, now, ZERO);
           p.setACL(writePerms);
           t.put(p);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 9131370..bb37bcc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -460,7 +460,7 @@ public class TestTokenAuthentication {
 //        final RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString());
 //        ServerName sn =
 //            ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(),
-//                System.currentTimeMillis());
+//                EnvironmentEdgeManager.currentTime());
 //        try {
 //          // Make a proxy to go between the shaded RpcController that rpc expects and the
 //          // non-shaded controller this CPEP is providing. This is because this test does a neat
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java
index 54e25a2..780233e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -94,10 +95,10 @@ public class TestZKSecretWatcherRefreshKeys {
     Integer[] keys = { 1, 2, 3, 4, 5, 6 };
     for (Integer key : keys) {
       AuthenticationKey ak = new AuthenticationKey(key,
-          System.currentTimeMillis() + 600 * 1000, null);
+        EnvironmentEdgeManager.currentTime() + 600 * 1000, null);
       ZKUtil.createWithParents(zk,
-          ZNodePaths.joinZNode(watcher.getKeysParentZNode(), key.toString()),
-          Writables.getBytes(ak));
+        ZNodePaths.joinZNode(watcher.getKeysParentZNode(), key.toString()),
+        Writables.getBytes(ak));
     }
     Assert.assertNull(keyManager.getCurrentKey());
     watcher.refreshKeys();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
index 1916589..2e69ac2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
@@ -815,13 +815,13 @@ public final class SnapshotTestingUtils {
     loadData(util, mutator, rows, families);
   }
 
-  public static void loadData(final HBaseTestingUtility util, final BufferedMutator mutator, int rows,
-      byte[]... families) throws IOException, InterruptedException {
+  public static void loadData(final HBaseTestingUtility util, final BufferedMutator mutator,
+      int rows, byte[]... families) throws IOException, InterruptedException {
     // Ensure one row per region
     assertTrue(rows >= KEYS.length);
     for (byte k0: KEYS) {
       byte[] k = new byte[] { k0 };
-      byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k);
+      byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), k);
       byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value)));
       final byte[][] families1 = families;
       final byte[] key1 = key;
@@ -832,7 +832,8 @@ public final class SnapshotTestingUtils {
 
     // Add other extra rows. more rows, more files
     while (rows-- > 0) {
-      byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
+      byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()),
+        Bytes.toBytes(rows));
       byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
       final byte[][] families1 = families;
       final byte[] key1 = key;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
index a1628f2..5c50d04 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -441,12 +442,13 @@ public class TestFlushSnapshotFromClient {
   private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
       throws IOException, InterruptedException {
     // Verify that there's one region less
-    long startTime = System.currentTimeMillis();
+    long startTime = EnvironmentEdgeManager.currentTime();
     while (admin.getRegions(TABLE_NAME).size() != numRegionsAfterMerge) {
       // This may be flaky... if after 15sec the merge is not complete give up
       // it will fail in the assertEquals(numRegionsAfterMerge).
-      if ((System.currentTimeMillis() - startTime) > 15000)
+      if ((EnvironmentEdgeManager.currentTime() - startTime) > 15000) {
         break;
+      }
       Thread.sleep(100);
     }
     SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
index 4efaa1a..22a17da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -106,7 +107,7 @@ public class TestRestoreFlushSnapshotFromClient {
   public void setup() throws Exception {
     this.admin = UTIL.getAdmin();
 
-    long tid = System.currentTimeMillis();
+    long tid = EnvironmentEdgeManager.currentTime();
     tableName = TableName.valueOf("testtb-" + tid);
     snapshotName0 = "snaptb0-" + tid;
     snapshotName1 = "snaptb1-" + tid;
@@ -172,14 +173,16 @@ public class TestRestoreFlushSnapshotFromClient {
 
   @Test(expected=SnapshotDoesNotExistException.class)
   public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
-    String snapshotName = "random-snapshot-" + System.currentTimeMillis();
-    TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
+    String snapshotName = "random-snapshot-" + EnvironmentEdgeManager.currentTime();
+    TableName tableName = TableName.valueOf("random-table-" +
+      EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName, tableName);
   }
 
   @Test
   public void testCloneSnapshot() throws IOException, InterruptedException {
-    TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+    TableName clonedTableName = TableName.valueOf("clonedtb-" +
+      EnvironmentEdgeManager.currentTime());
     testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
     testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
   }
@@ -195,7 +198,8 @@ public class TestRestoreFlushSnapshotFromClient {
 
   @Test
   public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
-    TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
+    TableName clonedTableName = TableName.valueOf("clonedtb-" +
+      EnvironmentEdgeManager.currentTime());
     admin.cloneSnapshot(snapshotName0, clonedTableName);
     verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
     admin.snapshot(snapshotName2, clonedTableName, SnapshotType.FLUSH);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java
index 34dd8b2..2585b9f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java
@@ -50,7 +50,7 @@ public class ConstantDelayQueue<E> implements BlockingQueue<E> {
 
     @Override
     public long getDelay(TimeUnit unit) {
-      return unit.convert(end - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+      return unit.convert(end - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS);
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
index 0691e6f..f086d06 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java
@@ -125,7 +125,7 @@ public class HFileTestUtil {
         .withPath(fs, path)
         .withFileContext(meta)
         .create();
-    long now = System.currentTimeMillis();
+    long now = EnvironmentEdgeManager.currentTime();
     try {
       // subtract 2 since iterateOnSplits doesn't include boundary keys
       for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) {
@@ -144,7 +144,8 @@ public class HFileTestUtil {
         writer.append(kv);
       }
     } finally {
-      writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
+      writer.appendFileInfo(BULKLOAD_TIME_KEY,
+        Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
       writer.close();
     }
   }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 1b8f2ed..85d59ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -158,7 +158,7 @@ public abstract class MultiThreadedAction {
     this.endKey = endKey;
     this.numThreads = numThreads;
     (new Thread(new ProgressReporter(actionLetter),
-        "MultiThreadedAction-ProgressReporter-" + System.currentTimeMillis())).start();
+        "MultiThreadedAction-ProgressReporter-" + EnvironmentEdgeManager.currentTime())).start();
   }
 
   private static String formatTime(long elapsedTime) {
@@ -182,7 +182,7 @@ public abstract class MultiThreadedAction {
 
     @Override
     public void run() {
-      long startTime = System.currentTimeMillis();
+      long startTime = EnvironmentEdgeManager.currentTime();
       long priorNumKeys = 0;
       long priorCumulativeOpTime = 0;
       int priorAverageKeysPerSecond = 0;
@@ -197,7 +197,7 @@ public abstract class MultiThreadedAction {
           LOG.info(threadsLeft + "Number of keys = 0");
         } else {
           long numKeys = MultiThreadedAction.this.numKeys.get();
-          long time = System.currentTimeMillis() - startTime;
+          long time = EnvironmentEdgeManager.currentTime() - startTime;
           long totalOpTime = totalOpTimeMs.get();
 
           long numKeysDelta = numKeys - priorNumKeys;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
index 27f5fb9..58b046f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
@@ -185,7 +185,7 @@ public class MultiThreadedReader extends MultiThreadedAction
         LOG.info("Started thread #" + readerId + " for reads...");
       }
 
-      startTimeMs = System.currentTimeMillis();
+      startTimeMs = EnvironmentEdgeManager.currentTime();
       curKey = startKey;
       long [] keysForThisReader = new long[batchSize];
       while (curKey < endKey && !aborted) {
@@ -281,7 +281,7 @@ public class MultiThreadedReader extends MultiThreadedAction
           numReadFailures.addAndGet(1);
           LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "")
               + ", time from start: "
-              + (System.currentTimeMillis() - startTimeMs) + " ms");
+              + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms");
           if (printExceptionTrace) {
             LOG.warn(e.toString(), e);
             printExceptionTrace = false;
@@ -296,7 +296,7 @@ public class MultiThreadedReader extends MultiThreadedAction
           for (long keyToRead : keysToRead) {
             LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "")
                 + ", time from start: "
-                + (System.currentTimeMillis() - startTimeMs) + " ms");
+                + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms");
           }
           if (printExceptionTrace) {
             LOG.warn(e.toString(), e);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
index 86a8500..f76042e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
@@ -142,7 +142,7 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader {
     private void recordFailure(final long keyToRead) {
       numReadFailures.addAndGet(1);
       LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "") + ", "
-          + "time from start: " + (System.currentTimeMillis() - startTimeMs) + " ms");
+          + "time from start: " + (EnvironmentEdgeManager.currentTime() - startTimeMs) + " ms");
     }
   }
 
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
index 9fdedca..30050a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
@@ -278,7 +278,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
 
     public void mutate(Table table, Mutation m,
         long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) {
-      long start = System.currentTimeMillis();
+      long start = EnvironmentEdgeManager.currentTime();
       try {
         m = dataGenerator.beforeMutate(keyBase, m);
         if (m instanceof Increment) {
@@ -293,11 +293,11 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
           throw new IllegalArgumentException(
             "unsupported mutation " + m.getClass().getSimpleName());
         }
-        totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
+        totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start);
       } catch (IOException e) {
         if (ignoreNonceConflicts) {
           LOG.info("Detected nonce conflict, ignoring: " + e.getMessage());
-          totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
+          totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start);
           return;
         }
         failedKeySet.add(keyBase);
@@ -309,7 +309,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
           exceptionInfo = StringUtils.stringifyException(e);
         }
         LOG.error("Failed to mutate: " + keyBase + " after " +
-            (System.currentTimeMillis() - start) +
+            (EnvironmentEdgeManager.currentTime() - start) +
           "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: "
             + exceptionInfo);
       }
@@ -331,7 +331,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
 
   public void mutate(Table table, Mutation m,
       long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) {
-    long start = System.currentTimeMillis();
+    long start = EnvironmentEdgeManager.currentTime();
     try {
       m = dataGenerator.beforeMutate(keyBase, m);
       if (m instanceof Increment) {
@@ -346,7 +346,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
         throw new IllegalArgumentException(
           "unsupported mutation " + m.getClass().getSimpleName());
       }
-      totalOpTimeMs.addAndGet(System.currentTimeMillis() - start);
+      totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start);
     } catch (IOException e) {
       failedKeySet.add(keyBase);
       String exceptionInfo;
@@ -360,9 +360,9 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
... 1053 lines suppressed ...

Mime
View raw message