From commits-return-98703-archive-asf-public=cust-asf.ponee.io@hbase.apache.org Tue Jun 1 23:07:51 2021 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mxout1-he-de.apache.org (mxout1-he-de.apache.org [95.216.194.37]) by mx-eu-01.ponee.io (Postfix) with ESMTPS id 65CC4180636 for ; Wed, 2 Jun 2021 01:07:51 +0200 (CEST) Received: from mail.apache.org (mailroute1-lw-us.apache.org [207.244.88.153]) by mxout1-he-de.apache.org (ASF Mail Server at mxout1-he-de.apache.org) with SMTP id 5A1E260B7E for ; Tue, 1 Jun 2021 23:07:49 +0000 (UTC) Received: (qmail 99798 invoked by uid 500); 1 Jun 2021 23:07:48 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 99789 invoked by uid 99); 1 Jun 2021 23:07:48 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 01 Jun 2021 23:07:48 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 21B8F81A86; Tue, 1 Jun 2021 23:07:48 +0000 (UTC) Date: Tue, 01 Jun 2021 23:07:44 +0000 To: "commits@hbase.apache.org" Subject: [hbase] branch branch-2 updated: HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <162258886245.24174.3134048834159948512@gitbox.apache.org> From: apurtell@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: hbase X-Git-Refname: refs/heads/branch-2 X-Git-Reftype: branch X-Git-Oldrev: d4285be5c1248517d7d04a607796b6726844604a X-Git-Newrev: a4e8ee183e17a92be70c66da80db2dcbd0f7e622 X-Git-Rev: a4e8ee183e17a92be70c66da80db2dcbd0f7e622 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. apurtell pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new a4e8ee1 HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302) a4e8ee1 is described below commit a4e8ee183e17a92be70c66da80db2dcbd0f7e622 Author: Andrew Purtell AuthorDate: Tue Jun 1 09:57:48 2021 -0700 HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302) We introduced EnvironmentEdgeManager as a way to inject alternate clocks for unit tests. In order for this to be effective, all callers that would otherwise use System.currentTimeMillis() must call EnvironmentEdgeManager.currentTime() instead, except the implementers of EnvironmentEdge. Signed-off-by: Bharath Vissapragada Signed-off-by: Duo Zhang Signed-off-by: Viraj Jasani Conflicts: hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java --- .../java/org/apache/hadoop/hbase/HRegionInfo.java | 3 +- .../apache/hadoop/hbase/ServerMetricsBuilder.java | 3 +- .../hadoop/hbase/client/BufferedMutatorImpl.java | 5 +- .../hadoop/hbase/client/ClientIdGenerator.java | 3 +- .../apache/hadoop/hbase/client/ClientScanner.java | 5 +- .../org/apache/hadoop/hbase/client/Delete.java | 2 +- .../client/PreemptiveFastFailInterceptor.java | 6 +- .../hadoop/hbase/client/RegionInfoBuilder.java | 3 +- .../hadoop/hbase/client/ScannerCallable.java | 5 +- .../apache/hadoop/hbase/master/RegionState.java | 7 +- .../hadoop/hbase/slowlog/SlowLogTableAccessor.java | 8 +- .../hadoop/hbase/client/TestAsyncProcess.java | 15 +-- .../hadoop/hbase/client/TestClientNoCluster.java | 11 +- .../hadoop/hbase/client/TestRegionInfoBuilder.java | 5 +- .../hadoop/hbase/client/TestRegionInfoDisplay.java | 3 +- .../hadoop/hbase/client/TestSnapshotFromAdmin.java | 5 +- .../org/apache/hadoop/hbase/ScheduledChore.java | 6 +- .../io/hadoopbackport/ThrottledInputStream.java | 4 +- .../hadoop/hbase/util/CoprocessorClassLoader.java | 4 +- .../java/org/apache/hadoop/hbase/util/IdLock.java | 4 +- .../org/apache/hadoop/hbase/util/Random64.java | 4 +- .../apache/hadoop/hbase/util/ReflectionUtils.java | 2 +- .../java/org/apache/hadoop/hbase/util/Sleeper.java | 6 +- .../java/org/apache/hadoop/hbase/util/Threads.java | 4 +- .../apache/hadoop/hbase/TestCellComparator.java | 5 +- .../java/org/apache/hadoop/hbase/TestKeyValue.java | 15 +-- .../test/java/org/apache/hadoop/hbase/Waiter.java | 9 +- .../hadoop/hbase/util/TestByteBufferArray.java | 2 +- .../org/apache/hadoop/hbase/util/TestBytes.java | 2 +- .../org/apache/hadoop/hbase/util/TestThreads.java | 4 +- .../hadoop/hbase/coprocessor/TestSecureExport.java | 3 +- .../example/ExampleMasterObserverWithMetrics.java | 5 +- .../example/WriteHeavyIncrementObserver.java | 3 +- .../example/TestZooKeeperScanPolicyObserver.java | 3 +- .../apache/hadoop/hbase/hbtop/screen/Screen.java | 5 +- .../hbase/hbtop/screen/top/TopScreenModel.java | 3 +- .../hbase/hbtop/screen/top/TopScreenPresenter.java | 5 +- .../apache/hadoop/hbase/http/NoCacheFilter.java | 4 +- .../org/apache/hadoop/hbase/ChaosZKClient.java | 5 +- .../hadoop/hbase/DistributedHBaseCluster.java | 13 ++- .../apache/hadoop/hbase/IntegrationTestIngest.java | 7 +- .../hadoop/hbase/IntegrationTestManyRegions.java | 5 +- .../IntegrationTestRegionReplicaReplication.java | 7 +- .../StripeCompactionsPerformanceEvaluation.java | 10 +- .../chaos/actions/MoveRegionsOfTableAction.java | 8 +- .../hbase/chaos/actions/SnapshotTableAction.java | 3 +- .../hbase/chaos/policies/PeriodicPolicy.java | 5 +- .../IntegrationTestTableSnapshotInputFormat.java | 3 +- .../hbase/test/IntegrationTestBigLinkedList.java | 13 ++- .../hbase/test/IntegrationTestLoadCommonCrawl.java | 3 +- ...nTestTimeBoundedRequestsWithRegionReplicas.java | 5 +- .../hadoop/hbase/mapred/TableRecordReaderImpl.java | 7 +- .../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 6 +- .../apache/hadoop/hbase/mapreduce/ImportTsv.java | 3 +- .../apache/hadoop/hbase/mapreduce/SyncTable.java | 3 +- .../hbase/mapreduce/TableRecordReaderImpl.java | 7 +- .../apache/hadoop/hbase/mapreduce/WALPlayer.java | 5 +- .../hadoop/hbase/snapshot/ExportSnapshot.java | 5 +- .../apache/hadoop/hbase/PerformanceEvaluation.java | 5 +- .../mapreduce/TestCellBasedImportExport2.java | 5 +- .../hadoop/hbase/mapreduce/TestCellCounter.java | 3 +- .../hadoop/hbase/mapreduce/TestCopyTable.java | 3 +- .../hadoop/hbase/mapreduce/TestImportExport.java | 5 +- .../hadoop/hbase/mapreduce/TestRowCounter.java | 17 +-- .../hadoop/hbase/mapreduce/TestSyncTable.java | 5 +- .../hadoop/hbase/mapreduce/TestWALInputFormat.java | 3 +- .../hbase/mapreduce/TestWALRecordReader.java | 21 ++-- .../hbase/replication/TestVerifyReplication.java | 21 ++-- .../replication/TestVerifyReplicationAdjunct.java | 15 +-- .../TestVerifyReplicationCrossDiffHdfs.java | 5 +- .../hadoop/hbase/snapshot/TestExportSnapshot.java | 6 +- .../snapshot/TestExportSnapshotV1NoCluster.java | 3 +- .../hadoop/hbase/procedure2/ProcedureExecutor.java | 2 +- .../procedure2/store/wal/WALProcedureStore.java | 9 +- .../TestProcedureSchedulerConcurrency.java | 5 +- .../store/ProcedureStorePerformanceEvaluation.java | 5 +- .../ProcedureWALLoaderPerformanceEvaluation.java | 14 +-- .../org/apache/hadoop/hbase/rest/RESTServer.java | 3 +- .../hadoop/hbase/rest/ScannerResultGenerator.java | 3 +- .../apache/hadoop/hbase/rest/client/Client.java | 5 +- .../hadoop/hbase/rest/PerformanceEvaluation.java | 3 +- .../hbase/rest/client/TestRemoteAdminRetries.java | 5 +- .../hbase/rest/client/TestRemoteHTableRetries.java | 5 +- .../hadoop/hbase/rest/client/TestRemoteTable.java | 3 +- .../hadoop/hbase/rsgroup/TestEnableRSGroups.java | 5 +- .../hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java | 13 ++- .../org/apache/hadoop/hbase/HealthCheckChore.java | 12 +- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 9 +- .../hadoop/hbase/io/hfile/HFileWriterImpl.java | 9 +- .../hbase/io/hfile/LruAdaptiveBlockCache.java | 5 +- .../hadoop/hbase/io/hfile/PrefetchExecutor.java | 3 +- .../org/apache/hadoop/hbase/ipc/CallRunner.java | 9 +- .../apache/hadoop/hbase/ipc/NettyRpcServer.java | 3 +- .../hadoop/hbase/ipc/NettyServerRpcConnection.java | 3 +- .../org/apache/hadoop/hbase/ipc/RpcServer.java | 3 +- .../org/apache/hadoop/hbase/ipc/ServerCall.java | 3 +- .../apache/hadoop/hbase/ipc/SimpleRpcServer.java | 14 ++- .../hadoop/hbase/ipc/SimpleRpcServerResponder.java | 7 +- .../hbase/ipc/SimpleServerRpcConnection.java | 5 +- .../org/apache/hadoop/hbase/master/HMaster.java | 21 ++-- .../hadoop/hbase/master/MasterRpcServices.java | 3 +- .../apache/hadoop/hbase/master/ServerManager.java | 13 +-- .../hbase/master/assignment/AssignmentManager.java | 4 +- .../hbase/master/assignment/RegionStateStore.java | 4 +- .../hbase/master/balancer/SimpleLoadBalancer.java | 5 +- .../hadoop/hbase/master/cleaner/DirScanPool.java | 5 +- .../hadoop/hbase/master/cleaner/HFileCleaner.java | 5 +- .../hadoop/hbase/master/locking/LockManager.java | 9 +- .../hadoop/hbase/master/locking/LockProcedure.java | 7 +- .../monitoring/MemoryBoundedLogMessageBuffer.java | 4 +- .../hbase/monitoring/MonitoredRPCHandlerImpl.java | 4 +- .../hadoop/hbase/monitoring/MonitoredTaskImpl.java | 8 +- .../hbase/namequeues/impl/SlowLogQueueService.java | 3 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 8 +- .../hadoop/hbase/regionserver/HRegionServer.java | 20 ++-- .../hadoop/hbase/regionserver/RSRpcServices.java | 6 +- .../hadoop/hbase/regionserver/ScannerContext.java | 13 ++- .../hbase/regionserver/compactions/Compactor.java | 2 +- .../regionserver/handler/WALSplitterHandler.java | 5 +- .../hbase/regionserver/wal/AbstractFSWAL.java | 2 +- .../hadoop/hbase/regionserver/wal/WALUtil.java | 3 +- .../replication/HBaseReplicationEndpoint.java | 5 +- .../regionserver/DumpReplicationQueues.java | 6 +- .../HBaseInterClusterReplicationEndpoint.java | 6 +- .../replication/regionserver/MetricsSink.java | 7 +- .../regionserver/ReplicationSinkManager.java | 11 +- .../regionserver/ReplicationSourceShipper.java | 5 +- .../regionserver/ReplicationSyncUp.java | 8 +- .../access/SnapshotScannerHDFSAclHelper.java | 33 +++--- .../org/apache/hadoop/hbase/tool/CanaryTool.java | 10 +- .../java/org/apache/hadoop/hbase/util/FSUtils.java | 2 +- .../apache/hadoop/hbase/util/JVMClusterUtil.java | 4 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 2 +- .../apache/hadoop/hbase/util/RegionSplitter.java | 6 +- .../util/compaction/MajorCompactionTTLRequest.java | 3 +- .../hbase/util/compaction/MajorCompactor.java | 4 +- .../apache/hadoop/hbase/wal/AbstractWALRoller.java | 9 +- .../org/apache/hadoop/hbase/wal/WALSplitUtil.java | 3 +- .../java/org/apache/hadoop/hbase/HBaseCluster.java | 5 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 13 ++- .../hadoop/hbase/HFilePerformanceEvaluation.java | 5 +- .../org/apache/hadoop/hbase/MiniHBaseCluster.java | 9 +- .../apache/hadoop/hbase/MultithreadedTestUtil.java | 5 +- .../hadoop/hbase/PerformanceEvaluationCommons.java | 5 +- .../hadoop/hbase/TestGlobalMemStoreSize.java | 5 +- .../org/apache/hadoop/hbase/TestIOFencing.java | 11 +- .../apache/hadoop/hbase/TestMetaTableAccessor.java | 21 ++-- .../hbase/TestMetaTableAccessorNoCluster.java | 8 +- .../apache/hadoop/hbase/TestMetaTableLocator.java | 3 +- .../org/apache/hadoop/hbase/TestSerialization.java | 126 ++------------------- .../org/apache/hadoop/hbase/TestZooKeeper.java | 3 +- .../hadoop/hbase/backup/TestHFileArchiving.java | 9 +- ...shotFromClientAfterSplittingRegionTestBase.java | 5 +- ...hotFromClientCloneLinksAfterDeleteTestBase.java | 7 +- .../CloneSnapshotFromClientErrorTestBase.java | 5 +- .../CloneSnapshotFromClientNormalTestBase.java | 7 +- .../client/CloneSnapshotFromClientTestBase.java | 4 +- .../RestoreSnapshotFromClientCloneTestBase.java | 3 +- .../RestoreSnapshotFromClientSimpleTestBase.java | 3 +- .../client/RestoreSnapshotFromClientTestBase.java | 4 +- .../org/apache/hadoop/hbase/client/TestAdmin2.java | 13 ++- .../org/apache/hadoop/hbase/client/TestAdmin3.java | 3 +- .../hbase/client/TestAsyncRegionAdminApi.java | 12 +- .../apache/hadoop/hbase/client/TestAsyncTable.java | 5 +- .../hbase/client/TestAsyncTableAdminApi2.java | 5 +- .../hbase/client/TestBlockEvictionFromClient.java | 10 +- .../hbase/client/TestClientScannerRPCTimeout.java | 5 +- .../hadoop/hbase/client/TestClientTimeouts.java | 3 +- .../hbase/client/TestConnectionImplementation.java | 4 +- .../apache/hadoop/hbase/client/TestFastFail.java | 5 +- .../hadoop/hbase/client/TestFromClientSide.java | 5 +- .../hadoop/hbase/client/TestFromClientSide3.java | 3 +- .../hadoop/hbase/client/TestFromClientSide5.java | 11 +- ...oneSnapshotFromClientCloneLinksAfterDelete.java | 6 +- .../client/TestPutDeleteEtcCellIteration.java | 3 +- .../hbase/client/TestScannersFromClientSide.java | 4 +- .../client/TestSnapshotCloneIndependence.java | 5 +- .../hadoop/hbase/client/TestSnapshotMetadata.java | 13 ++- .../client/TestSnapshotTemporaryDirectory.java | 5 +- .../hbase/client/TestTableSnapshotScanner.java | 3 +- .../hbase/client/locking/TestEntityLocks.java | 5 +- .../hadoop/hbase/codec/CodecPerformance.java | 9 +- .../hbase/coprocessor/TestCoprocessorMetrics.java | 5 +- .../hbase/coprocessor/TestCoprocessorStop.java | 8 +- .../coprocessor/TestRegionObserverInterface.java | 4 +- .../coprocessor/TestRegionObserverStacking.java | 7 +- .../hadoop/hbase/coprocessor/TestWALObserver.java | 8 +- .../favored/TestFavoredNodeAssignmentHelper.java | 4 +- .../hbase/filter/TestDependentColumnFilter.java | 3 +- .../hbase/filter/TestFilterSerialization.java | 5 +- .../filter/TestFuzzyRowAndColumnRangeFilter.java | 5 +- .../hbase/filter/TestFuzzyRowFilterEndToEnd.java | 9 +- .../apache/hadoop/hbase/fs/TestBlockReorder.java | 10 +- .../hbase/fs/TestBlockReorderBlockLocation.java | 5 +- .../hbase/fs/TestBlockReorderMultiBlocks.java | 5 +- .../org/apache/hadoop/hbase/io/TestHeapSize.java | 5 +- .../hbase/io/encoding/TestChangingEncoding.java | 5 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 5 +- .../hfile/TestHFileScannerImplReferenceCount.java | 3 +- .../hbase/io/hfile/TestScannerFromBucketCache.java | 6 +- .../hbase/ipc/TestProtobufRpcServiceImpl.java | 8 +- .../hadoop/hbase/ipc/TestSimpleRpcScheduler.java | 16 ++- .../hadoop/hbase/master/AbstractTestDLS.java | 10 +- .../hbase/master/TestActiveMasterManager.java | 8 +- .../hbase/master/TestClockSkewDetection.java | 11 +- .../hadoop/hbase/master/TestMasterMetrics.java | 4 +- .../hadoop/hbase/master/TestMasterNoCluster.java | 3 +- .../master/TestMetaAssignmentWithStopMaster.java | 9 +- .../hadoop/hbase/master/TestRegionPlacement.java | 3 +- .../hadoop/hbase/master/TestSplitLogManager.java | 3 +- .../hadoop/hbase/master/TestWarmupRegion.java | 3 +- .../master/assignment/MockMasterServices.java | 5 +- .../master/assignment/TestAssignmentManager.java | 5 +- .../assignment/TestAssignmentManagerBase.java | 3 +- .../master/assignment/TestRegionReplicaSplit.java | 3 +- .../master/assignment/TestRegionStateStore.java | 2 +- .../hbase/master/assignment/TestRegionStates.java | 13 ++- .../master/assignment/TestRogueRSAssignment.java | 4 +- .../hbase/master/cleaner/TestHFileCleaner.java | 4 +- .../hbase/master/cleaner/TestLogsCleaner.java | 9 +- .../cleaner/TestReplicationHFileCleaner.java | 8 +- .../hbase/master/janitor/TestCatalogJanitor.java | 5 +- .../master/janitor/TestCatalogJanitorCluster.java | 11 +- .../janitor/TestCatalogJanitorInMemoryStates.java | 5 +- .../hadoop/hbase/master/janitor/TestMetaFixer.java | 11 +- .../hbase/master/locking/TestLockProcedure.java | 9 +- ...terProcedureSchedulerPerformanceEvaluation.java | 7 +- .../procedure/MasterProcedureTestingUtility.java | 6 +- .../procedure/TestCloneSnapshotProcedure.java | 3 +- .../procedure/TestRestoreSnapshotProcedure.java | 3 +- .../procedure/TestSafemodeBringsDownMaster.java | 5 +- .../hbase/master/region/MasterRegionTestBase.java | 3 +- .../master/region/TestMasterRegionCompaction.java | 3 +- .../region/TestMasterRegionOnTwoFileSystems.java | 6 +- .../org/apache/hadoop/hbase/mob/MobTestUtil.java | 3 +- .../hbase/mob/TestExpiredMobFileCleaner.java | 5 +- .../hadoop/hbase/mob/TestMobDataBlockEncoding.java | 3 +- .../hbase/mob/compactions/TestMobCompactor.java | 5 +- .../compactions/TestPartitionedMobCompactor.java | 24 ++-- .../RegionProcedureStorePerformanceEvaluation.java | 3 +- .../region/RegionProcedureStoreTestHelper.java | 3 +- .../store/region/TestRegionProcedureStore.java | 3 +- .../hadoop/hbase/quotas/TestRateLimiter.java | 2 +- .../regionserver/TestBulkLoadReplication.java | 6 +- .../hbase/regionserver/TestCompactingMemStore.java | 10 +- .../TestCompactingToCellFlatMapMemStore.java | 27 ++--- .../hbase/regionserver/TestCompactionState.java | 5 +- .../regionserver/TestDataBlockEncodingTool.java | 3 +- .../regionserver/TestDefaultCompactSelection.java | 2 +- .../hbase/regionserver/TestDefaultMemStore.java | 10 +- .../regionserver/TestEndToEndSplitTransaction.java | 21 ++-- .../hadoop/hbase/regionserver/TestHMobStore.java | 3 +- .../hadoop/hbase/regionserver/TestHRegion.java | 41 +++---- .../hadoop/hbase/regionserver/TestHRegionInfo.java | 7 +- .../regionserver/TestHRegionReplayEvents.java | 2 +- .../regionserver/TestHRegionServerBulkLoad.java | 3 +- .../hadoop/hbase/regionserver/TestHStore.java | 10 +- .../hadoop/hbase/regionserver/TestHStoreFile.java | 13 ++- .../hbase/regionserver/TestMajorCompaction.java | 5 +- .../regionserver/TestMasterAddressTracker.java | 18 ++- .../regionserver/TestMemStoreSegmentsIterator.java | 5 +- .../hbase/regionserver/TestMobStoreCompaction.java | 5 +- .../hbase/regionserver/TestMobStoreScanner.java | 11 +- .../hbase/regionserver/TestRegionIncrement.java | 11 +- .../TestRegionMergeTransactionOnCluster.java | 11 +- .../hadoop/hbase/regionserver/TestRegionOpen.java | 5 +- .../hbase/regionserver/TestRegionReplicas.java | 5 +- .../regionserver/TestRegionServerAbortTimeout.java | 5 +- .../regionserver/TestRegionServerMetrics.java | 5 +- .../TestRegionServerReportForDuty.java | 9 +- .../hbase/regionserver/TestReversibleScanners.java | 3 +- .../regionserver/TestRowPrefixBloomFilter.java | 3 +- .../hadoop/hbase/regionserver/TestScanner.java | 7 +- .../regionserver/TestScannerWithBulkload.java | 10 +- .../regionserver/TestSimpleTimeRangeTracker.java | 3 +- .../TestStoreFileScannerWithTagCompression.java | 3 +- .../hbase/regionserver/TestStoreScanner.java | 13 ++- .../hadoop/hbase/regionserver/TestWALLockup.java | 3 +- .../hadoop/hbase/regionserver/TestWideScanner.java | 3 +- .../compactions/TestFIFOCompactionPolicy.java | 2 +- .../TestCompactionWithThroughputController.java | 11 +- .../hbase/regionserver/wal/AbstractTestFSWAL.java | 8 +- .../wal/AbstractTestLogRollPeriod.java | 5 +- .../regionserver/wal/AbstractTestProtobufLog.java | 6 +- .../regionserver/wal/AbstractTestWALReplay.java | 4 +- .../hbase/regionserver/wal/TestAsyncFSWAL.java | 3 +- .../hbase/regionserver/wal/TestDurability.java | 13 ++- .../hbase/regionserver/wal/TestLogRollAbort.java | 5 +- .../hbase/regionserver/wal/TestLogRolling.java | 9 +- .../regionserver/wal/TestLogRollingNoCluster.java | 3 +- .../TestReplicationEmptyWALRecovery.java | 11 +- .../hbase/replication/TestReplicationEndpoint.java | 3 +- .../hbase/replication/TestReplicationKillRS.java | 5 +- .../replication/TestReplicationSmallTests.java | 5 +- .../TestReplicationWALEntryFilters.java | 11 +- .../regionserver/TestDumpReplicationQueues.java | 3 +- ...InterClusterReplicationEndpointFilterEdits.java | 30 ++--- .../regionserver/TestReplicationSink.java | 5 +- .../regionserver/TestReplicationSourceManager.java | 10 +- .../TestSerialReplicationEndpoint.java | 5 +- .../regionserver/TestWALEntryStream.java | 18 +-- .../security/access/TestAccessController.java | 3 +- .../access/TestCellACLWithMultipleVersions.java | 2 +- .../security/token/TestTokenAuthentication.java | 2 +- .../token/TestZKSecretWatcherRefreshKeys.java | 7 +- .../hbase/snapshot/SnapshotTestingUtils.java | 9 +- .../snapshot/TestFlushSnapshotFromClient.java | 6 +- .../TestRestoreFlushSnapshotFromClient.java | 14 ++- .../hadoop/hbase/util/ConstantDelayQueue.java | 2 +- .../apache/hadoop/hbase/util/HFileTestUtil.java | 5 +- .../hadoop/hbase/util/MultiThreadedAction.java | 6 +- .../hadoop/hbase/util/MultiThreadedReader.java | 6 +- .../hbase/util/MultiThreadedReaderWithACL.java | 2 +- .../hadoop/hbase/util/MultiThreadedUpdater.java | 18 +-- .../hbase/util/MultiThreadedUpdaterWithACL.java | 10 +- .../hadoop/hbase/util/MultiThreadedWriter.java | 10 +- .../hadoop/hbase/util/MultiThreadedWriterBase.java | 3 +- .../hbase/util/MultiThreadedWriterWithACL.java | 8 +- .../hadoop/hbase/util/TestBloomFilterChunk.java | 12 +- .../hbase/util/TestDefaultEnvironmentEdge.java | 11 +- .../hadoop/hbase/util/TestFSTableDescriptors.java | 4 +- .../org/apache/hadoop/hbase/util/TestFSUtils.java | 16 +-- .../org/apache/hadoop/hbase/util/TestIdLock.java | 4 +- .../hadoop/hbase/util/TestIdReadWriteLock.java | 4 +- .../util/TestIncrementingEnvironmentEdge.java | 2 +- .../hbase/util/compaction/TestMajorCompactor.java | 5 +- .../apache/hadoop/hbase/wal/TestCompressedWAL.java | 3 +- .../hadoop/hbase/wal/TestFSHLogProvider.java | 3 +- .../org/apache/hadoop/hbase/wal/TestSecureWAL.java | 3 +- .../apache/hadoop/hbase/wal/TestWALFactory.java | 31 ++--- .../apache/hadoop/hbase/wal/TestWALMethods.java | 3 +- .../hadoop/hbase/wal/TestWALReaderOnSecureWAL.java | 6 +- .../apache/hadoop/hbase/wal/TestWALRootDir.java | 12 +- .../org/apache/hadoop/hbase/wal/TestWALSplit.java | 4 +- .../hadoop/hbase/wal/TestWALSplitToHFile.java | 10 +- .../hadoop/hbase/wal/WALPerformanceEvaluation.java | 5 +- .../hbase/thrift/TBoundedThreadPoolServer.java | 5 +- .../apache/hadoop/hbase/thrift/ThriftServer.java | 3 +- .../hadoop/hbase/thrift/TestThriftServer.java | 11 +- .../hadoop/hbase/thrift2/TestThriftConnection.java | 3 +- .../thrift2/TestThriftHBaseServiceHandler.java | 23 ++-- .../hadoop/hbase/zookeeper/MetaTableLocator.java | 6 +- .../hbase/zookeeper/MiniZooKeeperCluster.java | 10 +- .../hadoop/hbase/zookeeper/ZKNodeTracker.java | 5 +- 344 files changed, 1325 insertions(+), 1114 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 8754430..2a0e804 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.RegionInfoDisplay; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.DataInputBuffer; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -248,7 +249,7 @@ public class HRegionInfo implements RegionInfo { public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, final boolean split) throws IllegalArgumentException { - this(tableName, startKey, endKey, split, System.currentTimeMillis()); + this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTime()); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 17ceea3..d935272 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -32,6 +32,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; @@ -136,7 +137,7 @@ public final class ServerMetricsBuilder { private final Map regionStatus = new TreeMap<>(Bytes.BYTES_COMPARATOR); private final Map userMetrics = new TreeMap<>(Bytes.BYTES_COMPARATOR); private final Set coprocessorNames = new TreeSet<>(); - private long reportTimestamp = System.currentTimeMillis(); + private long reportTimestamp = EnvironmentEdgeManager.currentTime(); private long lastReportTimestamp = 0; private ServerMetricsBuilder(ServerName serverName) { this.serverName = serverName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index 11ce174..d3b1886 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; @@ -190,7 +191,7 @@ public class BufferedMutatorImpl implements BufferedMutator { } if (currentWriteBufferSize.get() == 0) { - firstRecordInBufferTimestamp.set(System.currentTimeMillis()); + firstRecordInBufferTimestamp.set(EnvironmentEdgeManager.currentTime()); } currentWriteBufferSize.addAndGet(toAddSize); writeAsyncBuffer.addAll(ms); @@ -209,7 +210,7 @@ public class BufferedMutatorImpl implements BufferedMutator { if (currentWriteBufferSize.get() == 0) { return; // Nothing to flush } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (firstRecordInBufferTimestamp.get() + writeBufferPeriodicFlushTimeoutMs.get() > now) { return; // No need to flush yet } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java index d4b4b4a..9125132 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java @@ -26,6 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * The class that is able to determine some unique strings for the client, @@ -45,7 +46,7 @@ final class ClientIdGenerator { byte[] selfBytes = getIpAddressBytes(); Long pid = getPid(); long tid = Thread.currentThread().getId(); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); byte[] id = new byte[selfBytes.length + ((pid != null ? 1 : 0) + 2) * Bytes.SIZEOF_LONG]; int offset = Bytes.putBytes(id, 0, selfBytes, 0, selfBytes.length); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index c799e5b..ac5a711 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.LeaseException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -103,7 +104,7 @@ public abstract class ClientScanner extends AbstractClientScanner { } this.scan = scan; this.tableName = tableName; - this.lastNext = System.currentTimeMillis(); + this.lastNext = EnvironmentEdgeManager.currentTime(); this.connection = connection; this.pool = pool; this.primaryOperationTimeout = primaryOperationTimeout; @@ -449,7 +450,7 @@ public abstract class ClientScanner extends AbstractClientScanner { } continue; } - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); if (this.scanMetrics != null) { this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - lastNext); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 7c32a68..c2f8ee5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience; * Specifying timestamps, deleteFamily and deleteColumns will delete all * versions with a timestamp less than or equal to that passed. If no * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's System.currentTimeMillis(). + * where 'now' is the servers's EnvironmentEdgeManager.currentTime(). * Specifying a timestamp to the deleteColumn method will * delete versions only with a timestamp equal to that specified. * If no timestamp is passed to deleteColumn, internally, it figures the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java index e82de2a..e35902c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java @@ -153,7 +153,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { } long currentTime = EnvironmentEdgeManager.currentTime(); FailureInfo fInfo = - computeIfAbsent(repeatedFailuresMap, serverName, () -> new FailureInfo(currentTime)); + computeIfAbsent(repeatedFailuresMap, serverName, () -> new FailureInfo(currentTime)); fInfo.timeOfLatestAttemptMilliSec = currentTime; fInfo.numConsecutiveFailures.incrementAndGet(); } @@ -180,7 +180,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { * cleanupInterval ms. */ protected void occasionallyCleanupFailureInformation() { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (!(now > lastFailureMapCleanupTimeMilliSec + failureMapCleanupIntervalMilliSec)) return; @@ -295,7 +295,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { repeatedFailuresMap.remove(server); } else { // update time of last attempt - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); fInfo.timeOfLatestAttemptMilliSec = currentTime; // Release the lock if we were retrying inspite of FastFail diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index cbf9e4a..cc42b96 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -48,7 +49,7 @@ public class RegionInfoBuilder { private final TableName tableName; private byte[] startKey = HConstants.EMPTY_START_ROW; private byte[] endKey = HConstants.EMPTY_END_ROW; - private long regionId = System.currentTimeMillis(); + private long regionId = EnvironmentEdgeManager.currentTime(); private int replicaId = RegionInfo.DEFAULT_REPLICA_ID; private boolean offLine = false; private boolean split = false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 7ca5a80..5d7cf70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.exceptions.ScannerResetException; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -241,7 +242,7 @@ public class ScannerCallable extends ClientServiceCallable { } else { response = next(); } - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); boolean isHeartBeat = response.hasHeartbeatMessage() && response.getHeartbeatMessage(); setHeartbeatMessage(isHeartBeat); if (isHeartBeat && scan.isNeedCursorResult() && response.hasCursor()) { @@ -249,7 +250,7 @@ public class ScannerCallable extends ClientServiceCallable { } Result[] rrs = ResponseConverter.getResults(getRpcControllerCellScanner(), response); if (logScannerActivity) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now - timestamp > logCutOffLatency) { int rows = rrs == null ? 0 : rrs.length; LOG.info("Took " + (now - timestamp) + "ms to fetch " + rows + " rows from scanner=" diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 2d03473..8ae0888 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.util.Date; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -188,11 +189,11 @@ public class RegionState { private long ritDuration; public static RegionState createForTesting(RegionInfo region, State state) { - return new RegionState(region, state, System.currentTimeMillis(), null); + return new RegionState(region, state, EnvironmentEdgeManager.currentTime(), null); } public RegionState(RegionInfo region, State state, ServerName serverName) { - this(region, state, System.currentTimeMillis(), serverName); + this(region, state, EnvironmentEdgeManager.currentTime(), serverName); } public RegionState(RegionInfo region, @@ -390,7 +391,7 @@ public class RegionState { * A slower (but more easy-to-read) stringification */ public String toDescriptiveString() { - long relTime = System.currentTimeMillis() - stamp; + long relTime = EnvironmentEdgeManager.currentTime() - stamp; return hri.getRegionNameAsString() + " state=" + state + ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)" diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java index f4f29c6..bf4cd04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/slowlog/SlowLogTableAccessor.java @@ -127,9 +127,9 @@ public class SlowLogTableAccessor { } /** - * Create rowKey: currentTimeMillis APPEND slowLogPayload.hashcode + * Create rowKey: currentTime APPEND slowLogPayload.hashcode * Scan on slowlog table should keep records with sorted order of time, however records - * added at the very same time (currentTimeMillis) could be in random order. + * added at the very same time could be in random order. * * @param slowLogPayload SlowLogPayload to process * @return rowKey byte[] @@ -141,8 +141,8 @@ public class SlowLogTableAccessor { if (lastFiveDig.startsWith("-")) { lastFiveDig = String.valueOf(RANDOM.nextInt(99999)); } - final long currentTimeMillis = EnvironmentEdgeManager.currentTime(); - final String timeAndHashcode = currentTimeMillis + lastFiveDig; + final long currentTime = EnvironmentEdgeManager.currentTime(); + final String timeAndHashcode = currentTime + lastFiveDig; final long rowKeyLong = Long.parseLong(timeAndHashcode); return Bytes.toBytes(rowKeyLong); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index f4a27fe..1aa3bb7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -67,8 +67,8 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.Assert; import org.junit.Before; import org.junit.ClassRule; @@ -77,6 +77,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @Category({ClientTests.class, LargeTests.class}) public class TestAsyncProcess { @@ -1024,9 +1025,9 @@ public class TestAsyncProcess { }; t2.start(); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); ap.submit(null, DUMMY_TABLE, new ArrayList<>(), false, null, false); - long end = System.currentTimeMillis(); + long end = EnvironmentEdgeManager.currentTime(); //Adds 100 to secure us against approximate timing. Assert.assertTrue(start + 100L + sleepTime > end); @@ -1757,7 +1758,7 @@ public class TestAsyncProcess { Put p = createPut(1, true); mutator.mutate(p); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { mutator.flush(); Assert.fail(); @@ -1765,7 +1766,7 @@ public class TestAsyncProcess { assertEquals(1, expected.getNumExceptions()); assertTrue(expected.getRow(0) == p); } - long actualSleep = System.currentTimeMillis() - startTime; + long actualSleep = EnvironmentEdgeManager.currentTime() - startTime; long expectedSleep = 0L; for (int i = 0; i < retries; i++) { expectedSleep += ConnectionUtils.getPauseTime(specialPause, i); @@ -1784,7 +1785,7 @@ public class TestAsyncProcess { mutator = new BufferedMutatorImpl(conn, bufferParam, ap); Assert.assertNotNull(mutator.getAsyncProcess().createServerErrorTracker()); mutator.mutate(p); - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); try { mutator.flush(); Assert.fail(); @@ -1792,7 +1793,7 @@ public class TestAsyncProcess { assertEquals(1, expected.getNumExceptions()); assertTrue(expected.getRow(0) == p); } - actualSleep = System.currentTimeMillis() - startTime; + actualSleep = EnvironmentEdgeManager.currentTime() - startTime; expectedSleep = 0L; for (int i = 0; i < retries; i++) { expectedSleep += ConnectionUtils.getPauseTime(normalPause, i); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index d79164c..6c98732 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -57,11 +57,11 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.Before; import org.junit.ClassRule; import org.junit.Ignore; @@ -72,6 +72,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; @@ -643,7 +644,7 @@ public class TestClientNoCluster extends Configured implements Tool { CellProtos.Cell.Builder cellBuilder = CellProtos.Cell.newBuilder(); cellBuilder.setRow(row); cellBuilder.setFamily(CATALOG_FAMILY_BYTESTRING); - cellBuilder.setTimestamp(System.currentTimeMillis()); + cellBuilder.setTimestamp(EnvironmentEdgeManager.currentTime()); return cellBuilder; } @@ -767,7 +768,7 @@ public class TestClientNoCluster extends Configured implements Tool { */ static void cycle(int id, final Configuration c, final Connection sharedConnection) throws IOException { long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); final int printInterval = 100000; Random rd = new Random(id); boolean get = c.getBoolean("hbase.test.do.gets", false); @@ -786,7 +787,7 @@ public class TestClientNoCluster extends Configured implements Tool { } } LOG.info("Finished a cycle putting " + namespaceSpan + " in " + - (System.currentTimeMillis() - startTime) + "ms"); + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } else { try (BufferedMutator mutator = sharedConnection.getBufferedMutator(tableName)) { @@ -803,7 +804,7 @@ public class TestClientNoCluster extends Configured implements Tool { } } LOG.info("Finished a cycle putting " + namespaceSpan + " in " + - (System.currentTimeMillis() - startTime) + "ms"); + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index 0d2b7cc..d7b375b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.ClassRule; import org.junit.Rule; @@ -198,7 +199,7 @@ public class TestRegionInfoBuilder { public void testParseName() throws IOException { final TableName tableName = name.getTableName(); byte[] startKey = Bytes.toBytes("startKey"); - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); int replicaId = 42; // test without replicaId @@ -228,7 +229,7 @@ public class TestRegionInfoBuilder { byte[] startKey = Bytes.toBytes("startKey"); byte[] endKey = Bytes.toBytes("endKey"); boolean split = false; - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); int replicaId = 42; RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey) diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java index 0800acd..f72fb66 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Assert; import org.junit.ClassRule; import org.junit.Rule; @@ -57,7 +58,7 @@ public class TestRegionInfoDisplay { .setStartKey(startKey) .setEndKey(endKey) .setSplit(false) - .setRegionId(System.currentTimeMillis()) + .setRegionId(EnvironmentEdgeManager.currentTime()) .setReplicaId(1).build(); checkEquality(ri, conf); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY, diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index dba1a36..ec2d29d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -122,9 +123,9 @@ public class TestSnapshotFromAdmin { String snapshot = "snapshot"; final TableName table = TableName.valueOf(name.getMethodName()); // get start time - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); admin.snapshot(snapshot, table); - long finish = System.currentTimeMillis(); + long finish = EnvironmentEdgeManager.currentTime(); long elapsed = (finish - start); assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time); admin.close(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java index 6155bbd..aaedc78 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hbase; import com.google.errorprone.annotations.RestrictedApi; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -172,7 +174,7 @@ public abstract class ScheduledChore implements Runnable { */ private synchronized void updateTimeTrackingBeforeRun() { timeOfLastRun = timeOfThisRun; - timeOfThisRun = System.currentTimeMillis(); + timeOfThisRun = EnvironmentEdgeManager.currentTime(); } /** @@ -215,7 +217,7 @@ public abstract class ScheduledChore implements Runnable { * @return true if time is earlier or equal to current milli time */ private synchronized boolean isValidTime(final long time) { - return time > 0 && time <= System.currentTimeMillis(); + return time > 0 && time <= EnvironmentEdgeManager.currentTime(); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java index 68627c3..6f45d2d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java @@ -42,7 +42,7 @@ public class ThrottledInputStream extends InputStream { private final InputStream rawStream; private final long maxBytesPerSec; - private final long startTime = System.currentTimeMillis(); + private final long startTime = EnvironmentEdgeManager.currentTime(); private long bytesRead = 0; private long totalSleepTime = 0; @@ -164,7 +164,7 @@ public class ThrottledInputStream extends InputStream { * @return Read rate, in bytes/sec. */ public long getBytesPerSec() { - long elapsed = (System.currentTimeMillis() - startTime) / 1000; + long elapsed = (EnvironmentEdgeManager.currentTime() - startTime) / 1000; if (elapsed == 0) { return bytesRead; } else { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java index f1589ba..f36ff13 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java @@ -174,7 +174,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase { for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern if (fs.isFile(path)) { // only process files, skip for directories File dst = new File(parentDirStr, "." + pathPrefix + "." - + path.getName() + "." + System.currentTimeMillis() + ".jar"); + + path.getName() + "." + EnvironmentEdgeManager.currentTime() + ".jar"); fs.copyToLocalFile(path, new Path(dst.toString())); dst.deleteOnExit(); @@ -188,7 +188,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase { Matcher m = libJarPattern.matcher(entry.getName()); if (m.matches()) { File file = new File(parentDirStr, "." + pathPrefix + "." - + path.getName() + "." + System.currentTimeMillis() + "." + m.group(1)); + + path.getName() + "." + EnvironmentEdgeManager.currentTime() + "." + m.group(1)); try (FileOutputStream outStream = new FileOutputStream(file)) { IOUtils.copyBytes(jarFile.getInputStream(entry), outStream, conf, true); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java index 112af1e..fbf12ab 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IdLock.java @@ -129,7 +129,7 @@ public class IdLock { Thread currentThread = Thread.currentThread(); Entry entry = new Entry(id, currentThread); Entry existing; - long waitUtilTS = System.currentTimeMillis() + time; + long waitUtilTS = EnvironmentEdgeManager.currentTime() + time; long remaining = time; while ((existing = map.putIfAbsent(entry.id, entry)) != null) { synchronized (existing) { @@ -139,7 +139,7 @@ public class IdLock { while (existing.locked) { existing.wait(remaining); if (existing.locked) { - long currentTS = System.currentTimeMillis(); + long currentTS = EnvironmentEdgeManager.currentTime(); if (currentTS >= waitUtilTS) { // time is up return null; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java index f337b5f..fa26758 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Random64.java @@ -108,7 +108,7 @@ public class Random64 { final int precision = 100000; final long totalTestCnt = defaultTotalTestCnt + precision; final int reportPeriod = 100 * precision; - final long startTime = System.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); System.out.println("Do collision test, totalTestCnt=" + totalTestCnt); @@ -130,7 +130,7 @@ public class Random64 { } if (cnt % reportPeriod == 0) { - long cost = System.currentTimeMillis() - startTime; + long cost = EnvironmentEdgeManager.currentTime() - startTime; long remainingMs = (long) (1.0 * (totalTestCnt - cnt) * cost / cnt); System.out.println( String.format( diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java index 29f4ae3..1ffe7df 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java @@ -118,7 +118,7 @@ public class ReflectionUtils { boolean dumpStack = false; if (log.isInfoEnabled()) { synchronized (ReflectionUtils.class) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now - previousLogTime >= minInterval * 1000) { previousLogTime = now; dumpStack = true; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java index 54accef..d2f0371 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -71,7 +71,7 @@ public class Sleeper { if (this.stopper.isStopped()) { return; } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); long currentSleepTime = sleepTime; while (currentSleepTime > 0) { long woke = -1; @@ -83,7 +83,7 @@ public class Sleeper { sleepLock.wait(currentSleepTime); } - woke = System.currentTimeMillis(); + woke = EnvironmentEdgeManager.currentTime(); long slept = woke - now; if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) { LOG.warn("We slept {}ms instead of {}ms, this is likely due to a long " + @@ -98,7 +98,7 @@ public class Sleeper { } } // Recalculate waitTime. - woke = (woke == -1)? System.currentTimeMillis(): woke; + woke = (woke == -1)? EnvironmentEdgeManager.currentTime() : woke; currentSleepTime = this.period - (woke - now); } synchronized(sleepLock) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java index 8b18595..55fc66b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -149,7 +149,7 @@ public class Threads { * @param msToWait the amount of time to sleep in milliseconds */ public static void sleepWithoutInterrupt(final long msToWait) { - long timeMillis = System.currentTimeMillis(); + long timeMillis = EnvironmentEdgeManager.currentTime(); long endTime = timeMillis + msToWait; boolean interrupted = false; while (timeMillis < endTime) { @@ -158,7 +158,7 @@ public class Threads { } catch (InterruptedException ex) { interrupted = true; } - timeMillis = System.currentTimeMillis(); + timeMillis = EnvironmentEdgeManager.currentTime(); } if (interrupted) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index 7762330..8729a44 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -139,7 +140,7 @@ public class TestCellComparator { */ @Test public void testMetaComparisons() throws Exception { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Meta compares Cell aaa = createByteBufferKeyValueFromKeyValue(new KeyValue( @@ -176,7 +177,7 @@ public class TestCellComparator { */ @Test public void testMetaComparisons2() { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); CellComparator c = MetaCellComparator.META_COMPARATOR; assertTrue(c.compare(createByteBufferKeyValueFromKeyValue(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)), diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index ab574cf..337cc61 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -39,6 +39,7 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -144,7 +145,7 @@ public class TestKeyValue { @Test public void testMoreComparisons() { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Meta compares KeyValue aaa = new KeyValue( @@ -174,7 +175,7 @@ public class TestKeyValue { @Test public void testMetaComparatorTableKeysWithCommaOk() { CellComparator c = MetaCellComparator.META_COMPARATOR; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // meta keys values are not quite right. A users can enter illegal values // from shell when scanning meta. KeyValue a = new KeyValue(Bytes.toBytes("table,key,with,commas1,1234"), now); @@ -204,7 +205,7 @@ public class TestKeyValue { } private void metacomparisons(final CellComparatorImpl c) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); assertTrue(c.compare(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now), new KeyValue( @@ -221,7 +222,7 @@ public class TestKeyValue { } private void comparisons(final CellComparatorImpl c) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); assertTrue(c.compare(new KeyValue( Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now), new KeyValue( @@ -520,7 +521,7 @@ public class TestKeyValue { @Test public void testMetaKeyComparator() { CellComparator c = MetaCellComparator.META_COMPARATOR; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); KeyValue a = new KeyValue(Bytes.toBytes("table1"), now); KeyValue b = new KeyValue(Bytes.toBytes("table2"), now); @@ -589,12 +590,12 @@ public class TestKeyValue { new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"), Bytes.toBytes("2")), new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"), - System.currentTimeMillis(), Bytes.toBytes("2"), + EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"), new Tag[] { new ArrayBackedTag((byte) 120, "tagA"), new ArrayBackedTag((byte) 121, Bytes.toBytes("tagB")) }), new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"), - System.currentTimeMillis(), Bytes.toBytes("2"), + EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"), new Tag[] { new ArrayBackedTag((byte) 0, "tagA") }), new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes(""), Bytes.toBytes("1")) }; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java index 5302093..fd5b0b1 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/Waiter.java @@ -23,6 +23,7 @@ import static org.junit.Assert.fail; import java.text.MessageFormat; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -171,7 +172,7 @@ public final class Waiter { */ public static long waitFor(Configuration conf, long timeout, long interval, boolean failIfTimeout, Predicate predicate) { - long started = System.currentTimeMillis(); + long started = EnvironmentEdgeManager.currentTime(); long adjustedTimeout = (long) (getWaitForRatio(conf) * timeout); long mustEnd = started + adjustedTimeout; long remainderWait; @@ -183,7 +184,7 @@ public final class Waiter { LOG.info(MessageFormat.format("Waiting up to [{0}] milli-secs(wait.for.ratio=[{1}])", adjustedTimeout, getWaitForRatio(conf))); while (!(eval = predicate.evaluate()) - && (remainderWait = mustEnd - System.currentTimeMillis()) > 0) { + && (remainderWait = mustEnd - EnvironmentEdgeManager.currentTime()) > 0) { try { // handle tail case when remainder wait is less than one interval sleepInterval = Math.min(remainderWait, interval); @@ -197,7 +198,7 @@ public final class Waiter { if (!eval) { if (interrupted) { LOG.warn(MessageFormat.format("Waiting interrupted after [{0}] msec", - System.currentTimeMillis() - started)); + EnvironmentEdgeManager.currentTime() - started)); } else if (failIfTimeout) { String msg = getExplanation(predicate); fail(MessageFormat @@ -208,7 +209,7 @@ public final class Waiter { MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout) + msg); } } - return (eval || interrupted) ? (System.currentTimeMillis() - started) : -1; + return (eval || interrupted) ? (EnvironmentEdgeManager.currentTime() - started) : -1; } catch (Exception ex) { throw new RuntimeException(ex); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java index 6929403..8384b05 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestByteBufferArray.java @@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category; @Category({ MiscTests.class, SmallTests.class }) public class TestByteBufferArray { - private static final Random RANDOM = new Random(System.currentTimeMillis()); + private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime()); @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index dde0d27..113eae1 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -359,7 +359,7 @@ public class TestBytes extends TestCase { public void testToStringBytesBinaryReversible() { // let's run test with 1000 randomly generated byte arrays - Random rand = new Random(System.currentTimeMillis()); + Random rand = new Random(EnvironmentEdgeManager.currentTime()); byte[] randomBytes = new byte[1000]; for (int i = 0; i < 1000; i++) { rand.nextBytes(randomBytes); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java index 4f6da4e..98b14ac 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestThreads.java @@ -56,7 +56,7 @@ public class TestThreads { }); LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)"); sleeper.start(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); LOG.debug("Main thread: sleeping for 200 ms"); Threads.sleep(200); @@ -75,7 +75,7 @@ public class TestThreads { assertTrue("sleepWithoutInterrupt did not preserve the thread's " + "interrupted status", wasInterrupted.get()); - long timeElapsed = System.currentTimeMillis() - startTime; + long timeElapsed = EnvironmentEdgeManager.currentTime() - startTime; // We expect to wait at least SLEEP_TIME_MS, but we can wait more if there is a GC. assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " + " sleep time of " + SLEEP_TIME_MS, SLEEP_TIME_MS - timeElapsed < TOLERANCE_MS); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 529973d..08e127f 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.UserGroupInformation; @@ -103,7 +104,7 @@ public class TestSecureExport { private static final byte[] ROW3 = Bytes.toBytes("row3"); private static final byte[] QUAL = Bytes.toBytes("qual"); private static final String LOCALHOST = "localhost"; - private static final long NOW = System.currentTimeMillis(); + private static final long NOW = EnvironmentEdgeManager.currentTime(); // user granted with all global permission private static final String USER_ADMIN = "admin"; // user is table owner. will have all permissions on table diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java index ecc2559..9686e4b 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Gauge; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.Timer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -77,14 +78,14 @@ public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, Mast TableDescriptor desc, RegionInfo[] regions) throws IOException { // we rely on the fact that there is only 1 instance of our MasterObserver. We keep track of // when the operation starts before the operation is executing. - this.createTableStartTime = System.currentTimeMillis(); + this.createTableStartTime = EnvironmentEdgeManager.currentTime(); } @Override public void postCreateTable(ObserverContext ctx, TableDescriptor desc, RegionInfo[] regions) throws IOException { if (this.createTableStartTime > 0) { - long time = System.currentTimeMillis() - this.createTableStartTime; + long time = EnvironmentEdgeManager.currentTime() - this.createTableStartTime; LOG.info("Create table took: " + time); // Update the timer metric for the create table operation duration. diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java index c7ebf0d..b277507 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.math.IntMath; @@ -225,7 +226,7 @@ public class WriteHeavyIncrementObserver implements RegionCoprocessor, RegionObs private long getUniqueTimestamp(byte[] row) { int slot = Bytes.hashCode(row) & mask; MutableLong lastTimestamp = lastTimestamps[slot]; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); synchronized (lastTimestamp) { long pt = lastTimestamp.longValue() >> 10; if (now > pt) { diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index 98514cb..5705c99 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; @@ -112,7 +113,7 @@ public class TestZooKeeperScanPolicyObserver { @Test public void test() throws IOException, KeeperException, InterruptedException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); put(0, 100, now - 10000); assertValueEquals(0, 100); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java index 0301664..2846c25 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.impl.TerminalImpl; import org.apache.hadoop.hbase.hbtop.terminal.impl.batch.BatchTerminal; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +99,7 @@ public class Screen implements Closeable { nextScreenView = currentScreenView.handleKeyPress(keyPress); } else { if (timerTimestamp != null) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (timerTimestamp <= now) { // Dispatch the timer to the current screen timerTimestamp = null; @@ -131,7 +132,7 @@ public class Screen implements Closeable { } public void setTimer(long delay) { - timerTimestamp = System.currentTimeMillis() + delay; + timerTimestamp = EnvironmentEdgeManager.currentTime() + delay; } public void cancelTimer() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java index bdd52a7..9cbcd18 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.hadoop.hbase.hbtop.field.FieldValue; import org.apache.hadoop.hbase.hbtop.mode.DrillDownInfo; import org.apache.hadoop.hbase.hbtop.mode.Mode; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -146,7 +147,7 @@ public class TopScreenModel { private void refreshSummary(ClusterMetrics clusterMetrics) { String currentTime = ISO_8601_EXTENDED_TIME_FORMAT - .format(System.currentTimeMillis()); + .format(EnvironmentEdgeManager.currentTime()); String version = clusterMetrics.getHBaseVersion(); String clusterId = clusterMetrics.getClusterId(); int liveServers = clusterMetrics.getLiveServerMetrics().size(); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java index 6fbd23f..e4e3cae 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.hbtop.screen.help.HelpScreenView; import org.apache.hadoop.hbase.hbtop.screen.mode.ModeScreenView; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -92,7 +93,7 @@ public class TopScreenPresenter { public long refresh(boolean force) { if (!force) { - long delay = System.currentTimeMillis() - lastRefreshTimestamp; + long delay = EnvironmentEdgeManager.currentTime() - lastRefreshTimestamp; if (delay < refreshDelay.get()) { return refreshDelay.get() - delay; } @@ -114,7 +115,7 @@ public class TopScreenPresenter { topScreenView.refreshTerminal(); - lastRefreshTimestamp = System.currentTimeMillis(); + lastRefreshTimestamp = EnvironmentEdgeManager.currentTime(); iterations++; return refreshDelay.get(); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java index a1d4173..cd49f7e 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java @@ -28,7 +28,7 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hbase.HBaseInterfaceAudience; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -43,7 +43,7 @@ public class NoCacheFilter implements Filter { throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.setHeader("Cache-Control", "no-cache"); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); httpRes.addDateHeader("Expires", now); httpRes.addDateHeader("Date", now); httpRes.addHeader("Pragma", "no-cache"); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java index 31fb9e3..f1fed9e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ChaosZKClient.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.AsyncCallback; @@ -118,9 +119,9 @@ public class ChaosZKClient { CreateMode.EPHEMERAL_SEQUENTIAL, submitTaskCallback, taskObject); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); - while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) { + while ((EnvironmentEdgeManager.currentTime() - start) < TASK_EXECUTION_TIMEOUT) { if(taskStatus != null) { return taskStatus; } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index 67f03c0..49c26f1 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; @@ -251,9 +252,9 @@ public class DistributedHBaseCluster extends HBaseCluster { private void waitForServiceToStop(ServiceType service, ServerName serverName, long timeout) throws IOException { LOG.info("Waiting for service: {} to stop: {}", service, serverName.getServerName()); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); - while ((System.currentTimeMillis() - start) < timeout) { + while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { if (!clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) { return; } @@ -265,9 +266,9 @@ public class DistributedHBaseCluster extends HBaseCluster { private void waitForServiceToStart(ServiceType service, ServerName serverName, long timeout) throws IOException { LOG.info("Waiting for service: {} to start: ", service, serverName.getServerName()); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); - while ((System.currentTimeMillis() - start) < timeout) { + while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { if (clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) { return; } @@ -308,8 +309,8 @@ public class DistributedHBaseCluster extends HBaseCluster { @Override public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < timeout) { + long start = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() - start < timeout) { try { getMasterAdminService(); return true; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java index 1850e91..c5bdc51 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java @@ -25,6 +25,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.hbase.util.Threads; @@ -162,15 +163,15 @@ public class IntegrationTestIngest extends IntegrationTestBase { LOG.info("Cluster size:" + util.getHBaseClusterInterface() .getClusterMetrics().getLiveServerMetrics().size()); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); long startKey = 0; long numKeys = getNumKeys(keysPerServerPerIter); - while (System.currentTimeMillis() - start < 0.9 * runtime) { + while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) { LOG.info("Intended run time: " + (runtime/60000) + " min, left:" + - ((runtime - (System.currentTimeMillis() - start))/60000) + " min"); + ((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min"); int ret = -1; ret = loadTool.run(getArgsForLoadTestTool("-write", diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java index b8dae55..2ab0b80 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java @@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm; import org.junit.After; @@ -115,11 +116,11 @@ public class IntegrationTestManyRegions { byte[][] splits = algo.split(REGION_COUNT); LOG.info(String.format("Creating table %s with %d splits.", TABLE_NAME, REGION_COUNT)); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { admin.createTable(desc, splits); LOG.info(String.format("Pre-split table created successfully in %dms.", - (System.currentTimeMillis() - startTime))); + (EnvironmentEdgeManager.currentTime() - startTime))); } catch (IOException e) { LOG.error("Failed to create table", e); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java index e4d961a..a307aed 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java @@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.ConstantDelayQueue; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.hbase.util.MultiThreadedUpdater; import org.apache.hadoop.hbase.util.MultiThreadedWriter; @@ -163,15 +164,15 @@ public class IntegrationTestRegionReplicaReplication extends IntegrationTestInge getConf().getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000) + 1000); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); long startKey = 0; long numKeys = getNumKeys(keysPerServerPerIter); - while (System.currentTimeMillis() - start < 0.9 * runtime) { + while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) { LOG.info("Intended run time: " + (runtime/60000) + " min, left:" + - ((runtime - (System.currentTimeMillis() - start))/60000) + " min"); + ((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min"); int verifyPercent = 100; int updatePercent = 20; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java index bfb8566..2fe3e22 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.regionserver.StripeStoreConfig; import org.apache.hadoop.hbase.regionserver.StripeStoreEngine; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LoadTestKVGenerator; import org.apache.hadoop.hbase.util.MultiThreadedAction; import org.apache.hadoop.hbase.util.MultiThreadedReader; @@ -204,14 +205,14 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool { if (preloadKeys > 0) { MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, TABLE_NAME); - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); preloader.start(0, startKey, writeThreads); preloader.waitForFinish(); if (preloader.getNumWriteFailures() > 0) { throw new IOException("Preload failed"); } int waitTime = (int)Math.min(preloadKeys / 100, 30000); // arbitrary - status(description + " preload took " + (System.currentTimeMillis()-time)/1000 + status(description + " preload took " + (EnvironmentEdgeManager.currentTime()-time)/1000 + "sec; sleeping for " + waitTime/1000 + "sec for store to stabilize"); Thread.sleep(waitTime); } @@ -221,7 +222,7 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool { // reader.getMetrics().enable(); reader.linkToWriter(writer); - long testStartTime = System.currentTimeMillis(); + long testStartTime = EnvironmentEdgeManager.currentTime(); writer.start(startKey, endKey, writeThreads); reader.start(startKey, endKey, readThreads); writer.waitForFinish(); @@ -255,7 +256,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool { } } LOG.info("Performance data dump for " + description + " test: \n" + perfDump.toString());*/ - status(description + " test took " + (System.currentTimeMillis()-testStartTime)/1000 + "sec"); + status(description + " test took " + + (EnvironmentEdgeManager.currentTime() - testStartTime) / 1000 + "sec"); Assert.assertTrue(success); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java index df6a2c8..a1cab78 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,9 +68,8 @@ public class MoveRegionsOfTableAction extends Action { Collections.shuffle(regions); - long start = System.currentTimeMillis(); - for (RegionInfo regionInfo:regions) { - + long start = EnvironmentEdgeManager.currentTime(); + for (RegionInfo regionInfo : regions) { // Don't try the move if we're stopping if (context.isStopping()) { return; @@ -82,7 +82,7 @@ public class MoveRegionsOfTableAction extends Action { // put a limit on max num regions. Otherwise, this won't finish // with a sleep time of 10sec, 100 regions will finish in 16min - if (System.currentTimeMillis() - start > maxTime) { + if (EnvironmentEdgeManager.currentTime() - start > maxTime) { break; } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java index ea5729e..2f39233 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/SnapshotTableAction.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,7 @@ public class SnapshotTableAction extends Action { @Override public void perform() throws Exception { HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); - String snapshotName = tableName + "-it-" + System.currentTimeMillis(); + String snapshotName = tableName + "-it-" + EnvironmentEdgeManager.currentTime(); Admin admin = util.getAdmin(); // Don't try the snapshot if we're stopping diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java index 9ffef16..ae1c65e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicPolicy.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.chaos.policies; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; /** A policy which does stuff every time interval. */ @@ -37,11 +38,11 @@ public abstract class PeriodicPolicy extends Policy { Threads.sleep(jitter); while (!isStopped()) { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); runOneIteration(); if (isStopped()) return; - long sleepTime = periodMs - (System.currentTimeMillis() - start); + long sleepTime = periodMs - (EnvironmentEdgeManager.currentTime() - start); if (sleepTime > 0) { LOG.info("Sleeping for {} ms", sleepTime); Threads.sleep(sleepTime); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java index 065cec9..39e0911 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.Before; @@ -127,7 +128,7 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase Configuration conf = getConf(); TableName tableName = TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME)); String snapshotName = conf.get(SNAPSHOT_NAME_KEY, tableName.getQualifierAsString() - + "_snapshot_" + System.currentTimeMillis()); + + "_snapshot_" + EnvironmentEdgeManager.currentTime()); int numRegions = conf.getInt(NUM_REGIONS_KEY, DEFAULT_NUM_REGIONS); String tableDirStr = conf.get(TABLE_DIR_KEY); Path tableDir; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 466618e..02e6383 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Random64; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.wal.WALEdit; @@ -711,9 +712,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { while (numQueries < maxQueries) { numQueries++; byte[] prev = node.prev; - long t1 = System.currentTimeMillis(); + long t1 = EnvironmentEdgeManager.currentTime(); node = getNode(prev, table, node); - long t2 = System.currentTimeMillis(); + long t2 = EnvironmentEdgeManager.currentTime(); if (node == null) { LOG.error("ConcurrentWalker found UNDEFINED NODE: " + Bytes.toStringBinary(prev)); context.getCounter(Counts.UNDEFINED).increment(1l); @@ -1702,10 +1703,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { scan.setBatch(1); scan.addColumn(FAMILY_NAME, COLUMN_PREV); - long t1 = System.currentTimeMillis(); + long t1 = EnvironmentEdgeManager.currentTime(); ResultScanner scanner = table.getScanner(scan); Result result = scanner.next(); - long t2 = System.currentTimeMillis(); + long t2 = EnvironmentEdgeManager.currentTime(); scanner.close(); if ( result != null) { @@ -1785,9 +1786,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { while (node != null && node.prev.length != NO_KEY.length && numQueries < maxQueries) { byte[] prev = node.prev; - long t1 = System.currentTimeMillis(); + long t1 = EnvironmentEdgeManager.currentTime(); node = getNode(prev, table, node); - long t2 = System.currentTimeMillis(); + long t2 = EnvironmentEdgeManager.currentTime(); if (logEvery > 0 && numQueries % logEvery == 0) { System.out.printf("CQ %d: %d %s \n", numQueries, t2 - t1, Bytes.toStringBinary(prev)); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java index 64a9540..9c91796 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadCommonCrawl.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.test.util.warc.WARCInputFormat; import org.apache.hadoop.hbase.test.util.warc.WARCRecord; import org.apache.hadoop.hbase.test.util.warc.WARCWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.LongWritable; @@ -582,7 +583,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase { String contentType = warcHeader.getField("WARC-Identified-Payload-Type"); if (contentType != null) { LOG.debug("Processing record id=" + recordID + ", targetURI=\"" + targetURI + "\""); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Make row key diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java index b5e99d2..69ce3e4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.hbase.util.MultiThreadedReader; import org.apache.hadoop.hbase.util.Threads; @@ -143,7 +144,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr LOG.info("Cluster size:"+ util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size()); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); long startKey = 0; @@ -197,7 +198,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr // set the intended run time for the reader. The reader will do read requests // to random keys for this amount of time. - long remainingTime = runtime - (System.currentTimeMillis() - start); + long remainingTime = runtime - (EnvironmentEdgeManager.currentTime() - start); if (remainingTime <= 0) { LOG.error("The amount of time left for the test to perform random reads is " + "non-positive. Increase the test execution time via " diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java index b68ea50..046da3a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -88,7 +89,7 @@ public class TableRecordReaderImpl { } if (logScannerActivity) { LOG.info("Current scan=" + currentScan.toString()); - timestamp = System.currentTimeMillis(); + timestamp = EnvironmentEdgeManager.currentTime(); rowcount = 0; } } @@ -196,7 +197,7 @@ public class TableRecordReaderImpl { if (logScannerActivity) { rowcount ++; if (rowcount >= logPerRowCount) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took " + (now-timestamp) + "ms to process " + rowcount + " rows"); timestamp = now; @@ -235,7 +236,7 @@ public class TableRecordReaderImpl { return false; } catch (IOException ioe) { if (logScannerActivity) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took " + (now-timestamp) + "ms to process " + rowcount + " rows"); LOG.info(ioe.toString(), ioe); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 02b5768..17143e4 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -437,8 +437,10 @@ public class HFileOutputFormat2 private void close(final StoreFileWriter w) throws IOException { if (w != null) { - w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); - w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); + w.appendFileInfo(BULKLOAD_TIME_KEY, + Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + w.appendFileInfo(BULKLOAD_TASK_KEY, + Bytes.toBytes(context.getTaskAttemptID().toString())); w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); w.appendTrackedTimestampsToMetadata(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 58d9184..ba0be03 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; @@ -765,7 +766,7 @@ public class ImportTsv extends Configured implements Tool { } // If timestamp option is not specified, use current system time. - long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis()); + long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, EnvironmentEdgeManager.currentTime()); // Set it back to replace invalid timestamp (non-numeric) with current // system time diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java index 6722501..749d357 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; @@ -492,7 +493,7 @@ public class SyncTable extends Configured implements Tool { sourceCell.getFamilyOffset(), sourceCell.getFamilyLength()) .setQualifier(sourceCell.getQualifierArray(), sourceCell.getQualifierOffset(), sourceCell.getQualifierLength()) - .setTimestamp(System.currentTimeMillis()) + .setTimestamp(EnvironmentEdgeManager.currentTime()) .setValue(sourceCell.getValueArray(), sourceCell.getValueOffset(), sourceCell.getValueLength()).build(); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 47a9ce4..9c58a4c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.TaskAttemptContext; @@ -90,7 +91,7 @@ public class TableRecordReaderImpl { this.scanner = this.htable.getScanner(currentScan); if (logScannerActivity) { LOG.info("Current scan=" + currentScan.toString()); - timestamp = System.currentTimeMillis(); + timestamp = EnvironmentEdgeManager.currentTime(); rowcount = 0; } } @@ -212,7 +213,7 @@ public class TableRecordReaderImpl { if (logScannerActivity) { rowcount ++; if (rowcount >= logPerRowCount) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); timestamp = now; rowcount = 0; @@ -264,7 +265,7 @@ public class TableRecordReaderImpl { } catch (IOException ioe) { updateCounters(); if (logScannerActivity) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); LOG.info(ioe.toString(), ioe); String lastRow = lastSuccessfulRow == null ? diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 1d38f3d..a6fef42 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; @@ -342,8 +343,8 @@ public class WALPlayer extends Configured implements Tool { conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = - Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis())); + Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + + EnvironmentEdgeManager.currentTime())); job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index a95a9f4..c5650c3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.Pair; @@ -416,7 +417,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { int reportBytes = 0; int bytesRead; - long stime = System.currentTimeMillis(); + long stime = EnvironmentEdgeManager.currentTime(); while ((bytesRead = in.read(buffer)) > 0) { out.write(buffer, 0, bytesRead); totalBytesWritten += bytesRead; @@ -431,7 +432,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool { reportBytes = 0; } } - long etime = System.currentTimeMillis(); + long etime = EnvironmentEdgeManager.currentTime(); context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); context.setStatus(String.format(statusMessage, diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index e6df491..9cbae33 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.ByteArrayHashKey; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.hadoop.hbase.util.Hash; import org.apache.hadoop.hbase.util.MurmurHash; @@ -1134,7 +1135,7 @@ public class PerformanceEvaluation extends Configured implements Tool { static abstract class TestBase { // Below is make it so when Tests are all running in the one // jvm, that they each have a differently seeded Random. - private static final Random randomSeed = new Random(System.currentTimeMillis()); + private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime()); private static long nextRandomSeed() { return randomSeed.nextLong(); @@ -2391,7 +2392,7 @@ public class PerformanceEvaluation extends Configured implements Tool { // write the serverName columns MetaTableAccessor.updateRegionLocation(connection, regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i, - System.currentTimeMillis()); + EnvironmentEdgeManager.currentTime()); return true; } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java index 5392a74..09c5cf5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.wal.WAL; @@ -114,7 +115,7 @@ public class TestCellBasedImportExport2 { private static String FQ_OUTPUT_DIR; private static final String EXPORT_BATCH_SIZE = "100"; - private static final long now = System.currentTimeMillis(); + private static final long now = EnvironmentEdgeManager.currentTime(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); @@ -588,7 +589,7 @@ public class TestCellBasedImportExport2 { @Test public void testExportScan() throws Exception { int version = 100; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long endTime = startTime + 1; String prefix = "row"; String label_0 = "label_0"; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 794a440..309ecc8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; @@ -67,7 +68,7 @@ public class TestCellCounter { private static Path FQ_OUTPUT_DIR; private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator + "output"; - private static long now = System.currentTimeMillis(); + private static long now = EnvironmentEdgeManager.currentTime(); @Rule public TestName name = new TestName(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 5e9bb11..0271983 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.mob.MobTestUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; @@ -292,7 +293,7 @@ public class TestCopyTable { p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23")); t.put(p); - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells", "--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000), "--versions=1", sourceTable.getNameAsString() }; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index aaa49e6..ef053f6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -130,7 +131,7 @@ public class TestImportExport { private static String FQ_OUTPUT_DIR; private static final String EXPORT_BATCH_SIZE = "100"; - private static final long now = System.currentTimeMillis(); + private static final long now = EnvironmentEdgeManager.currentTime(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); @@ -607,7 +608,7 @@ public class TestImportExport { @Test public void testExportScan() throws Exception { int version = 100; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long endTime = startTime + 1; String prefix = "row"; String label_0 = "label_0"; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index add1b58..5793dfa 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LauncherSecurityManager; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; @@ -249,12 +250,12 @@ public class TestRowCounter { // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); - ts = System.currentTimeMillis(); + ts = EnvironmentEdgeManager.currentTime(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); table.put(put1); Thread.sleep(100); - ts = System.currentTimeMillis(); + ts = EnvironmentEdgeManager.currentTime(); put2.addColumn(family, col1, ts, Bytes.toBytes("val2")); put3.addColumn(family, col1, ts, Bytes.toBytes("val3")); table.put(put2); @@ -302,9 +303,9 @@ public class TestRowCounter { rowCounter.setConf(TEST_UTIL.getConfiguration()); args = Arrays.copyOf(args, args.length+1); args[args.length-1]="--expectedCount=" + expectedCount; - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); int result = rowCounter.run(args); - long duration = System.currentTimeMillis() - start; + long duration = EnvironmentEdgeManager.currentTime() - start; LOG.debug("row count duration (ms): " + duration); assertTrue(result==0); } @@ -318,9 +319,9 @@ public class TestRowCounter { */ private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) throws Exception { Job job = RowCounter.createSubmittableJob(TEST_UTIL.getConfiguration(), args); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); job.waitForCompletion(true); - long duration = System.currentTimeMillis() - start; + long duration = EnvironmentEdgeManager.currentTime() - start; LOG.debug("row count duration (ms): " + duration); assertTrue(job.isSuccessful()); Counter counter = job.getCounters().findCounter(RowCounter.RowCounterMapper.Counters.ROWS); @@ -486,12 +487,12 @@ public class TestRowCounter { // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM)); - ts = System.currentTimeMillis(); + ts = EnvironmentEdgeManager.currentTime(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); table.put(put1); Thread.sleep(100); - ts = System.currentTimeMillis(); + ts = EnvironmentEdgeManager.currentTime(); put2.addColumn(family, col1, ts, Bytes.toBytes("val2")); put3.addColumn(family, col1, ts, Bytes.toBytes("val3")); table.put(put2); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index d84f80d..caacfc6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Counters; import org.junit.AfterClass; import org.junit.Assert; @@ -159,7 +160,7 @@ public class TestSyncTable { final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableIgnoreTimestampsTrue"); - long current = System.currentTimeMillis(); + long current = EnvironmentEdgeManager.currentTime(); writeTestData(sourceTableName, targetTableName, current - 1000, current); hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true"); Counters syncCounters = syncTables(sourceTableName, targetTableName, @@ -490,7 +491,7 @@ public class TestSyncTable { int sourceRegions = 10; int targetRegions = 6; if (ArrayUtils.isEmpty(timestamps)) { - long current = System.currentTimeMillis(); + long current = EnvironmentEdgeManager.currentTime(); timestamps = new long[]{current,current}; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java index 8d21c39..48e8518 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -44,7 +45,7 @@ public class TestWALInputFormat { public void testAddFile() { List lfss = new ArrayList<>(); LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now)); WALInputFormat.addFile(lfss, lfs, now, now); assertEquals(1, lfss.size()); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 0fa73f6..fb1b168 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -41,8 +41,8 @@ import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALFactory; @@ -131,7 +131,7 @@ public class TestWALRecordReader { WAL log = walfactory.getWAL(info); // This test depends on timestamp being millisecond based and the filename of the WAL also // being millisecond based. - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); log.appendData(info, getWalKeyImpl(ts, scopes), edit); @@ -145,7 +145,7 @@ public class TestWALRecordReader { LOG.info("Past 1st WAL roll " + log.toString()); Thread.sleep(1); - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); edit = new WALEdit(); edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value)); @@ -195,21 +195,24 @@ public class TestWALRecordReader { byte [] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - System.currentTimeMillis(), value)); - long txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit); + EnvironmentEdgeManager.currentTime(), value)); + long txid = log.appendData(info, + getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(1); // make sure 2nd log gets a later timestamp - long secondTs = System.currentTimeMillis(); + long secondTs = EnvironmentEdgeManager.currentTime(); log.rollWriter(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value)); - txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), + EnvironmentEdgeManager.currentTime(), value)); + txid = log.appendData(info, + getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); walfactory.shutdown(); - long thirdTs = System.currentTimeMillis(); + long thirdTs = EnvironmentEdgeManager.currentTime(); // should have 2 log files now WALInputFormat input = new WALInputFormat(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index 6869de9..04b766d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.mapreduce.Job; import org.junit.AfterClass; import org.junit.Before; @@ -283,20 +284,20 @@ public class TestVerifyReplication extends TestReplicationBase { // Take source and target tables snapshot Path rootDir = CommonFSUtils.getRootDir(CONF1); FileSystem fs = rootDir.getFileSystem(CONF1); - String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); - String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName, Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String tmpPath1 = UTIL1.getRandomDir().toString(); - String tmpPath2 = "/tmp" + System.currentTimeMillis(); + String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, @@ -320,11 +321,11 @@ public class TestVerifyReplication extends TestReplicationBase { Delete delete = new Delete(put.getRow()); htable2.delete(delete); - sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true); - peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName, Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); @@ -388,20 +389,20 @@ public class TestVerifyReplication extends TestReplicationBase { // Take source and target tables snapshot Path rootDir = CommonFSUtils.getRootDir(CONF1); FileSystem fs = rootDir.getFileSystem(CONF1); - String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); - String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String tmpPath1 = UTIL1.getRandomDir().toString(); - String tmpPath2 = "/tmp" + System.currentTimeMillis(); + String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), "--sourceSnapshotName=" + sourceSnapshotName, @@ -426,11 +427,11 @@ public class TestVerifyReplication extends TestReplicationBase { Delete delete = new Delete(put.getRow()); htable3.delete(delete); - sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); - peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index 191b76b..51a0748 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -105,7 +106,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { // normal Batch tests byte[] qualifierName = Bytes.toBytes("f1"); Put put = new Put(Bytes.toBytes("r1")); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1002")); htable1.put(put); put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v1001")); @@ -169,7 +170,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { // normal Batch tests byte[] qualifierName = Bytes.toBytes("f1"); Put put = new Put(Bytes.toBytes("r1")); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1")); htable1.put(put); put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v2")); @@ -286,20 +287,20 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { // Take source and target tables snapshot Path rootDir = CommonFSUtils.getRootDir(CONF1); FileSystem fs = rootDir.getFileSystem(CONF1); - String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); - String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName, Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String temPath1 = UTIL1.getRandomDir().toString(); - String temPath2 = "/tmp" + System.currentTimeMillis(); + String temPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, @@ -323,11 +324,11 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase { Delete delete = new Delete(put.getRow()); htable2.delete(delete); - sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true); - peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName, Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java index 639f686..1609865 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.mapreduce.Job; import org.junit.AfterClass; @@ -165,14 +166,14 @@ public class TestVerifyReplicationCrossDiffHdfs { public void testVerifyRepBySnapshot() throws Exception { Path rootDir = CommonFSUtils.getRootDir(conf1); FileSystem fs = rootDir.getFileSystem(conf1); - String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis(); + String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, new String(FAMILY), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(conf2); FileSystem peerFs = peerRootDir.getFileSystem(conf2); - String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis(); + String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, new String(FAMILY), peerSnapshotName, peerRootDir, peerFs, true); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 8363fb6..3560ca4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -328,13 +329,14 @@ public class TestExportSnapshot { private Path getHdfsDestinationDir() { Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - Path path = new Path(new Path(rootDir, "export-test"), "export-" + System.currentTimeMillis()); + Path path = new Path(new Path(rootDir, "export-test"), "export-" + + EnvironmentEdgeManager.currentTime()); LOG.info("HDFS export destination path: " + path); return path; } static Path getLocalDestinationDir(HBaseTestingUtility htu) { - Path path = htu.getDataTestDir("local-export-" + System.currentTimeMillis()); + Path path = htu.getDataTestDir("local-export-" + EnvironmentEdgeManager.currentTime()); try { FileSystem fs = FileSystem.getLocal(htu.getConfiguration()); LOG.info("Local export destination path: " + path); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index d476025..2ab9f11 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -115,7 +116,7 @@ public class TestExportSnapshotV1NoCluster { static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtility hctu, Path testDir) throws IOException { Path path = new Path(new Path(testDir, "export-test"), - "export-" + System.currentTimeMillis()).makeQualified(fs.getUri(), + "export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(), fs.getWorkingDirectory()); LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(), fs.getWorkingDirectory(), testDir); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 0932cf4..b1dc1a4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -284,7 +284,7 @@ public class ProcedureExecutor { } long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL); long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL); - if (retainer.isExpired(System.currentTimeMillis(), evictTtl, evictAckTtl)) { + if (retainer.isExpired(EnvironmentEdgeManager.currentTime(), evictTtl, evictAckTtl)) { LOG.debug("Procedure {} has already been finished and expired, skip force updating", procId); return; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index e59d566..d2d661f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase; import org.apache.hadoop.hbase.procedure2.util.ByteSlot; import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; @@ -829,12 +830,12 @@ public class WALProcedureStore extends ProcedureStoreBase { // Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing syncMaxSlot = runningProcCount; assert syncMaxSlot > 0 : "unexpected syncMaxSlot=" + syncMaxSlot; - final long syncWaitSt = System.currentTimeMillis(); + final long syncWaitSt = EnvironmentEdgeManager.currentTime(); if (slotIndex != syncMaxSlot) { slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS); } - final long currentTs = System.currentTimeMillis(); + final long currentTs = EnvironmentEdgeManager.currentTime(); final long syncWaitMs = currentTs - syncWaitSt; final float rollSec = getMillisFromLastRoll() / 1000.0f; final float syncedPerSec = totalSyncedToStore / rollSec; @@ -979,7 +980,7 @@ public class WALProcedureStore extends ProcedureStoreBase { } public long getMillisFromLastRoll() { - return (System.currentTimeMillis() - lastRollTs.get()); + return (EnvironmentEdgeManager.currentTime() - lastRollTs.get()); } void periodicRollForTesting() throws IOException { @@ -1103,7 +1104,7 @@ public class WALProcedureStore extends ProcedureStoreBase { stream = newStream; flushLogId = logId; totalSynced.set(0); - long rollTs = System.currentTimeMillis(); + long rollTs = EnvironmentEdgeManager.currentTime(); lastRollTs.set(rollTs); logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos, rollTs)); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java index b121c07..f56cdb3 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.Before; @@ -103,9 +104,9 @@ public class TestProcedureSchedulerConcurrency { } } if (wakeCount.get() != oldWakeCount) { - lastUpdate = System.currentTimeMillis(); + lastUpdate = EnvironmentEdgeManager.currentTime(); } else if (wakeCount.get() >= NRUNS && - (System.currentTimeMillis() - lastUpdate) > WAIT_THRESHOLD) { + (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { break; } Threads.sleepWithoutInterrupt(25); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java index cb31f02..17e4376 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -159,7 +159,8 @@ public abstract class ProcedureStorePerformanceEvaluation future : futures) { - long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - System.currentTimeMillis(); + long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - + EnvironmentEdgeManager.currentTime(); failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE)); } } catch (Exception e) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java index 1d681e9..90d1b0a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.procedure2.store.wal; -import static java.lang.System.currentTimeMillis; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -36,7 +34,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator; import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -166,7 +164,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { List procStates = shuffleProcWriteSequence(); TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE; - long startTime = currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long lastTime = startTime; for (int i = 0; i < procStates.size(); ++i) { int procId = procStates.get(i); @@ -181,14 +179,14 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { store.update(procs[procId]); } if (i > 0 && i % numProcsPerWal == 0) { - long currentTime = currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); System.out.println("Forcing wall roll. Time taken on last WAL: " + (currentTime - lastTime) / 1000.0f + " sec"); store.rollWriterForTesting(); lastTime = currentTime; } } - long timeTaken = currentTimeMillis() - startTime; + long timeTaken = EnvironmentEdgeManager.currentTime() - startTime; System.out.println("\n\nDone writing WALs.\nNum procs : " + numProcs + "\nTotal time taken : " + StringUtils.humanTimeDiff(timeTaken) + "\n\n"); } @@ -199,9 +197,9 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { store.start(1); store.recoverLease(); - long startTime = currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); store.load(loader); - long timeTaken = System.currentTimeMillis() - startTime; + long timeTaken = EnvironmentEdgeManager.currentTime() - startTime; System.out.println("******************************************"); System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec"); System.out.println("******************************************"); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index dfcb8e9..ca7af7d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.rest.filter.GzipFilter; import org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.DNS; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Strings; @@ -398,7 +399,7 @@ public class RESTServer implements Constants { // Put up info server. int port = conf.getInt("hbase.rest.info.port", 8085); if (port >= 0) { - conf.setLong("startcode", System.currentTimeMillis()); + conf.setLong("startcode", EnvironmentEdgeManager.currentTime()); String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0"); this.infoServer = new InfoServer("rest", a, port, false, conf); this.infoServer.setAttribute("hbase.conf", conf); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index df4e6ad..d31b4b1 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.rest.model.ScannerModel; import org.apache.hadoop.hbase.security.visibility.Authorizations; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -104,7 +105,7 @@ public class ScannerResultGenerator extends ResultGenerator { } scanner = table.getScanner(scan); cached = null; - id = Long.toString(System.currentTimeMillis()) + + id = Long.toString(EnvironmentEdgeManager.currentTime()) + Integer.toHexString(scanner.hashCode()); } finally { table.close(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 05a4cac..7707733 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -44,6 +44,7 @@ import javax.net.ssl.SSLContext; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.rest.Constants; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; @@ -332,7 +333,7 @@ public class Client { method.addHeader(header); } } - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); if (resp != null) EntityUtils.consumeQuietly(resp.getEntity()); resp = httpClient.execute(method); if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { @@ -342,7 +343,7 @@ public class Client { resp = httpClient.execute(method); } - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); if (LOG.isTraceEnabled()) { LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index c3f9c7c..fe66d9b 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.RemoteAdmin; import org.apache.hadoop.hbase.util.ByteArrayHashKey; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Hash; import org.apache.hadoop.hbase.util.MurmurHash; import org.apache.hadoop.hbase.util.Pair; @@ -840,7 +841,7 @@ public class PerformanceEvaluation extends Configured implements Tool { // Below is make it so when Tests are all running in the one // jvm, that they each have a differently seeded Random. private static final Random randomSeed = - new Random(System.currentTimeMillis()); + new Random(EnvironmentEdgeManager.currentTime()); private static long nextRandomSeed() { return randomSeed.nextLong(); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java index 4d0359d..adb5ae6 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -155,14 +156,14 @@ public class TestRemoteAdminRetries { } private void testTimedOutCall(CallExecutor callExecutor) throws Exception { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); try { callExecutor.run(); fail("should be timeout exception!"); } catch (IOException e) { assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString())); } - assertTrue((System.currentTimeMillis() - start) > MAX_TIME); + assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME); } private static interface CallExecutor { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java index 247897f..6338df8 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -182,14 +183,14 @@ public class TestRemoteHTableRetries { } private void testTimedOutCall(CallExecutor callExecutor) throws Exception { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); try { callExecutor.run(); fail("should be timeout exception!"); } catch (IOException e) { assertTrue(Pattern.matches(".*request timed out", e.toString())); } - assertTrue((System.currentTimeMillis() - start) > MAX_TIME); + assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME); } private interface CallExecutor { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 81680aa..6c90469 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.rest.RESTServlet; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.http.Header; import org.apache.http.message.BasicHeader; import org.junit.After; @@ -96,7 +97,7 @@ public class TestRemoteTable { private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2"); private static final long ONE_HOUR = 60 * 60 * 1000; - private static final long TS_2 = System.currentTimeMillis(); + private static final long TS_2 = EnvironmentEdgeManager.currentTime(); private static final long TS_1 = TS_2 - ONE_HOUR; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java index d15c7a8..b862fb0 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -84,8 +85,8 @@ public class TestEnableRSGroups { // wait RSGroupBasedLoadBalancer online RSGroupBasedLoadBalancer loadBalancer = (RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer(); - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start <= 60000 && !loadBalancer.isOnline()) { + long start = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() - start <= 60000 && !loadBalancer.isOnline()) { LOG.info("waiting for rsgroup load balancer onLine..."); sleep(200); } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java index 5e3fc5d..cbbb86c 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -501,8 +502,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { AtomicBoolean changed = new AtomicBoolean(false); Thread t1 = new Thread(() -> { LOG.debug("thread1 start running, will recover region state"); - long current = System.currentTimeMillis(); - while (System.currentTimeMillis() - current <= 50000) { + long current = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() - current <= 50000) { List regions = master.getAssignmentManager().getRegionsOnServer(movedServer); LOG.debug("server region size is:{}", regions.size()); assert regions.size() >= 1; @@ -602,8 +603,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { AtomicBoolean changed = new AtomicBoolean(false); Thread t1 = new Thread(() -> { LOG.info("thread1 start running, will recover region state"); - long current = System.currentTimeMillis(); - while (System.currentTimeMillis() - current <= 50000) { + long current = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() - current <= 50000) { List regions = master.getAssignmentManager().getRegionsOnServer(ss); List tableRegions = new ArrayList<>(); for (RegionInfo regionInfo : regions) { @@ -662,9 +663,9 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase { } return getTableRegionMap().get(tableName).size() >= tableRegionCount; }); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); - long timeTaken = System.currentTimeMillis() - startTime; + long timeTaken = EnvironmentEdgeManager.currentTime() - startTime; String msg = "Should not take mote than 15000 ms to move a table with 100 regions. Time taken =" + timeTaken + " ms"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java index c78f3b3..8db0ca2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HealthCheckChore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -62,8 +63,9 @@ public class HealthCheckChore extends ScheduledChore { + " number of times consecutively."); } // Always log health report. - LOG.info("Health status at " + StringUtils.formatTime(System.currentTimeMillis()) + " : " - + report.getHealthReport()); + LOG.info("Health status at " + + StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " + + report.getHealthReport()); } } @@ -73,9 +75,9 @@ public class HealthCheckChore extends ScheduledChore { // First time we are seeing a failure. No need to stop, just // record the time. numTimesUnhealthy++; - startWindow = System.currentTimeMillis(); + startWindow = EnvironmentEdgeManager.currentTime(); } else { - if ((System.currentTimeMillis() - startWindow) < failureWindow) { + if ((EnvironmentEdgeManager.currentTime() - startWindow) < failureWindow) { numTimesUnhealthy++; if (numTimesUnhealthy == threshold) { stop = true; @@ -85,7 +87,7 @@ public class HealthCheckChore extends ScheduledChore { } else { // Outside of failure window, so we reset to 1. numTimesUnhealthy = 1; - startWindow = System.currentTimeMillis(); + startWindow = EnvironmentEdgeManager.currentTime(); stop = false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index eb2557c..3547356 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.ShipperListener; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1033,10 +1034,10 @@ public class HFileBlock implements Cacheable { protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { ensureBlockReady(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size()); out.write(onDiskChecksum); - HFile.updateWriteLatency(System.currentTimeMillis() - startTime); + HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime); } /** @@ -1707,7 +1708,7 @@ public class HFileBlock implements Cacheable { // checksums. Can change with circumstances. The below flag is whether the // file has support for checksums (version 2+). boolean checksumSupport = this.fileContext.isUseHBaseChecksum(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); if (onDiskSizeWithHeader <= 0) { // We were not passed the block size. Need to get it from the header. If header was // not cached (see getCachedHeader above), need to seek to pull it in. This is costly @@ -1754,7 +1755,7 @@ public class HFileBlock implements Cacheable { if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) { return null; } - long duration = System.currentTimeMillis() - startTime; + long duration = EnvironmentEdgeManager.currentTime() - startTime; if (updateMetrics) { HFile.updateReadLatency(duration, pread); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 3a5c561..b9503ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.Writable; import org.apache.yetus.audience.InterfaceAudience; @@ -212,9 +213,9 @@ public class HFileWriterImpl implements HFile.Writer { throws IOException { trailer.setFileInfoOffset(outputStream.getPos()); finishFileInfo(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); fileInfo.write(out); - HFile.updateWriteLatency(System.currentTimeMillis() - startTime); + HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime); } /** @@ -837,9 +838,9 @@ public class HFileWriterImpl implements HFile.Writer { trailer.setEntryCount(entryCount); trailer.setCompressionCodec(hFileContext.getCompression()); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); trailer.serialize(outputStream); - HFile.updateWriteLatency(System.currentTimeMillis() - startTime); + HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime); if (closeOutputStream) { outputStream.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java index 99a3a2b..494a588 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruAdaptiveBlockCache.java @@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -1074,7 +1075,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { long freedSumMb = 0; int heavyEvictionCount = 0; int freedDataOverheadPercent = 0; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); while (this.go) { synchronized (this) { try { @@ -1107,7 +1108,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache { // This is should be almost the same time (+/- 10s) // because we get comparable volumes of freed bytes each time. // 10s because this is default period to run evict() (see above this.wait) - long stopTime = System.currentTimeMillis(); + long stopTime = EnvironmentEdgeManager.currentTime(); if ((stopTime - startTime) > 1000 * 10 - 1) { // Here we have to calc what situation we have got. // We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit" diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java index 175fb83..62a86d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,7 +63,7 @@ public final class PrefetchExecutor { new ThreadFactory() { @Override public Thread newThread(Runnable r) { - String name = "hfile-prefetch-" + System.currentTimeMillis(); + String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime(); Thread t = new Thread(r, name); t.setDaemon(true); return t; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 48ee664..a3bc367 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -25,15 +25,16 @@ import org.apache.hadoop.hbase.CallDroppedException; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.trace.TraceUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.User; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; import org.apache.htrace.core.TraceScope; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** * The request processing logic, which is usually executed in thread pools provided by an @@ -101,7 +102,7 @@ public class CallRunner { } return; } - call.setStartTime(System.currentTimeMillis()); + call.setStartTime(EnvironmentEdgeManager.currentTime()); if (call.getStartTime() > call.getDeadline()) { RpcServer.LOG.warn("Dropping timed out call: " + call); return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index de958b8..a518f9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.HBasePolicyProvider; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; @@ -192,7 +193,7 @@ public class NettyRpcServer extends RpcServer { MethodDescriptor md, Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) throws IOException { return call(service, md, param, cellScanner, receiveTime, status, - System.currentTimeMillis(), 0); + EnvironmentEdgeManager.currentTime(), 0); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java index 2f97f53..deed987 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java @@ -30,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.SingleByteBuff; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; @@ -116,7 +117,7 @@ class NettyServerRpcConnection extends ServerRpcConnection { long size, final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, - remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator, + remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, reqCleanup); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index b0e8b7d..4ebb639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.UserGroupInformation; @@ -395,7 +396,7 @@ public abstract class RpcServer implements RpcServerInterface, Message result = call.getService().callBlockingMethod(md, controller, param); long receiveTime = call.getReceiveTime(); long startTime = call.getStartTime(); - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); int processingTime = (int) (endTime - startTime); int qTime = (int) (startTime - receiveTime); int totalTime = (int) (endTime - receiveTime); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java index b53c770..9ef23e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -422,7 +423,7 @@ public abstract class ServerCall implements RpcCa @Override public long disconnectSince() { if (!this.connection.isConnectionOpen()) { - return System.currentTimeMillis() - receiveTime; + return EnvironmentEdgeManager.currentTime() - receiveTime; } else { return -1L; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 7818572..825c016 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.security.HBasePolicyProvider; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.IOUtils; @@ -321,7 +322,7 @@ public class SimpleRpcServer extends RpcServer { if (c == null) { return; } - c.setLastContact(System.currentTimeMillis()); + c.setLastContact(EnvironmentEdgeManager.currentTime()); try { count = c.readAndProcess(); } catch (InterruptedException ieo) { @@ -337,7 +338,7 @@ public class SimpleRpcServer extends RpcServer { closeConnection(c); c = null; } else { - c.setLastContact(System.currentTimeMillis()); + c.setLastContact(EnvironmentEdgeManager.currentTime()); } } @@ -483,8 +484,8 @@ public class SimpleRpcServer extends RpcServer { public Pair call(BlockingService service, MethodDescriptor md, Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) throws IOException { - return call(service, md, param, cellScanner, receiveTime, status, System.currentTimeMillis(), - 0); + return call(service, md, param, cellScanner, receiveTime, status, + EnvironmentEdgeManager.currentTime(), 0); } @Override @@ -609,7 +610,8 @@ public class SimpleRpcServer extends RpcServer { } SimpleServerRpcConnection register(SocketChannel channel) { - SimpleServerRpcConnection connection = getConnection(channel, System.currentTimeMillis()); + SimpleServerRpcConnection connection = getConnection(channel, + EnvironmentEdgeManager.currentTime()); add(connection); if (LOG.isTraceEnabled()) { LOG.trace("Connection from " + connection + @@ -640,7 +642,7 @@ public class SimpleRpcServer extends RpcServer { // synch'ed to avoid explicit invocation upon OOM from colliding with // timer task firing synchronized void closeIdle(boolean scanAll) { - long minLastContact = System.currentTimeMillis() - maxIdleTime; + long minLastContact = EnvironmentEdgeManager.currentTime() - maxIdleTime; // concurrent iterator might miss new connections added // during the iteration, but that's ok because they won't // be idle yet anyway and will be caught on next scan diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java index b68da56..d6d5dd0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServerResponder.java @@ -29,9 +29,10 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Sends responses of RPC back to clients. @@ -162,7 +163,7 @@ class SimpleRpcServerResponder extends Thread { * @return the time of the purge. */ private long purge(long lastPurgeTime) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now < lastPurgeTime + this.simpleRpcServer.purgeTimeout) { return lastPurgeTime; } @@ -247,7 +248,7 @@ class SimpleRpcServerResponder extends Thread { return true; } else { // set the serve time when the response has to be sent later - conn.lastSentTime = System.currentTimeMillis(); + conn.lastSentTime = EnvironmentEdgeManager.currentTime(); return false; // Socket can't take more, we will have to come back. } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java index 01127cc..0c7057a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.exceptions.RequestTooBigException; import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; import org.apache.hadoop.hbase.nio.ByteBuff; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService; import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; @@ -209,7 +210,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection { // Notify the client about the offending request SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null, - null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0, + null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0, this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder); this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION); // Make sure the client recognizes the underlying exception @@ -327,7 +328,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection { RequestHeader header, Message param, CellScanner cellScanner, long size, InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) { return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, - remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator, + remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, reqCleanup, this.responder); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8ae43a0..19ae56e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -193,6 +193,7 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.IdLock; @@ -787,7 +788,7 @@ public class HMaster extends HRegionServer implements MasterServices { */ status.setStatus("Initializing Master file system"); - this.masterActiveTime = System.currentTimeMillis(); + this.masterActiveTime = EnvironmentEdgeManager.currentTime(); // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. // always initialize the MemStoreLAB as we use a region to store data in master now, see @@ -877,7 +878,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Start the Zombie master detector after setting master as active, see HBASE-21535 Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), - "ActiveMasterInitializationMonitor-" + System.currentTimeMillis()); + "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime()); zombieDetector.setDaemon(true); zombieDetector.start(); @@ -1039,8 +1040,8 @@ public class HMaster extends HRegionServer implements MasterServices { status.markComplete("Initialization successful"); LOG.info(String.format("Master has completed initialization %.3fsec", - (System.currentTimeMillis() - masterActiveTime) / 1000.0f)); - this.masterFinishedInitializationTime = System.currentTimeMillis(); + (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f)); + this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime(); configurationManager.registerObserver(this.balancer); configurationManager.registerObserver(this.cleanerPool); configurationManager.registerObserver(this.hfileCleaner); @@ -1100,11 +1101,11 @@ public class HMaster extends HRegionServer implements MasterServices { * After master has started up, lets do balancer post startup initialization. Since this runs * in activeMasterManager thread, it should be fine. */ - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); this.balancer.postMasterStartupInitialize(); if (LOG.isDebugEnabled()) { LOG.debug("Balancer post startup initialization complete, took " + ( - (System.currentTimeMillis() - start) / 1000) + " seconds"); + (EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds"); } } @@ -1612,7 +1613,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Sleep to next balance plan start time // But if there are zero regions in transition, it can skip sleep to speed up. - while (!interrupted && System.currentTimeMillis() < nextBalanceStartTime + while (!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime && this.assignmentManager.getRegionStates().hasRegionsInTransition()) { try { Thread.sleep(100); @@ -1625,7 +1626,7 @@ public class HMaster extends HRegionServer implements MasterServices { while (!interrupted && maxRegionsInTransition > 0 && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() - >= maxRegionsInTransition && System.currentTimeMillis() <= cutoffTime) { + >= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) { try { // sleep if the number of regions in transition exceeds the limit Thread.sleep(100); @@ -1752,7 +1753,7 @@ public class HMaster extends HRegionServer implements MasterServices { public List executeRegionPlansWithThrottling(List plans) { List successRegionPlans = new ArrayList<>(); int maxRegionsInTransition = getMaxRegionsInTransition(); - long balanceStartTime = System.currentTimeMillis(); + long balanceStartTime = EnvironmentEdgeManager.currentTime(); long cutoffTime = balanceStartTime + this.maxBalancingTime; int rpCount = 0; // number of RegionPlans balanced so far if (plans != null && !plans.isEmpty()) { @@ -1782,7 +1783,7 @@ public class HMaster extends HRegionServer implements MasterServices { // if performing next balance exceeds cutoff time, exit the loop if (this.maxBalancingTime > 0 && rpCount < plans.size() - && System.currentTimeMillis() > cutoffTime) { + && EnvironmentEdgeManager.currentTime() > cutoffTime) { // TODO: After balance, there should not be a cutoff time (keeping it as // a security net for now) LOG.debug("No more balancing till next balance run; maxBalanceTime=" diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 15ade55..1c3137f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -2545,7 +2545,8 @@ public class MasterRpcServices extends RSRpcServices implements RegionState.State newState = RegionState.State.convert(s.getState()); LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info, prevState.getState(), newState); - Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis()); + Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, + EnvironmentEdgeManager.currentTime()); metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, Bytes.toBytes(newState.name())); List putList = new ArrayList<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 773c02b..ba0c049 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -482,8 +482,7 @@ public class ServerManager { ZKWatcher zkw = master.getZooKeeper(); int onlineServersCt; while ((onlineServersCt = onlineServers.size()) > 0){ - - if (System.currentTimeMillis() > (previousLogTime + 1000)) { + if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { Set remainingServers = onlineServers.keySet(); synchronized (onlineServers) { if (remainingServers.size() == 1 && remainingServers.contains(sn)) { @@ -500,7 +499,7 @@ public class ServerManager { sb.append(key); } LOG.info("Waiting on regionserver(s) " + sb.toString()); - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTime(); } try { @@ -698,8 +697,8 @@ public class ServerManager { if (timeout < 0) { return; } - long expiration = timeout + System.currentTimeMillis(); - while (System.currentTimeMillis() < expiration) { + long expiration = timeout + EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() < expiration) { controller.reset(); try { RegionInfo rsRegion = @@ -797,7 +796,7 @@ public class ServerManager { maxToStart = Integer.MAX_VALUE; } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); final long startTime = now; long slept = 0; long lastLogTime = 0; @@ -830,7 +829,7 @@ public class ServerManager { // We sleep for some time final long sleepTime = 50; Thread.sleep(sleepTime); - now = System.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTime(); slept = now - startTime; oldCount = count; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index daa1457..fdc9bad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -2217,7 +2217,7 @@ public class AssignmentManager { private void acceptPlan(final HashMap regions, final Map> plan) throws HBaseIOException { final ProcedureEvent[] events = new ProcedureEvent[regions.size()]; - final long st = System.currentTimeMillis(); + final long st = EnvironmentEdgeManager.currentTime(); if (plan.isEmpty()) { throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); @@ -2243,7 +2243,7 @@ public class AssignmentManager { } ProcedureEvent.wakeEvents(getProcedureScheduler(), events); - final long et = System.currentTimeMillis(); + final long et = EnvironmentEdgeManager.currentTime(); if (LOG.isTraceEnabled()) { LOG.trace("ASSIGN ACCEPT " + events.length + " -> " + StringUtils.humanTimeDiff(et - st)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 8660d69..033cc11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -90,11 +90,11 @@ public class RegionStateStore { if (r != null && !r.isEmpty()) { long st = 0; if (LOG.isTraceEnabled()) { - st = System.currentTimeMillis(); + st = EnvironmentEdgeManager.currentTime(); } visitMetaEntry(visitor, r); if (LOG.isTraceEnabled()) { - long et = System.currentTimeMillis(); + long et = EnvironmentEdgeManager.currentTime(); LOG.trace("[T] LOAD META PERF " + StringUtils.humanTimeDiff(et - st)); } } else if (isDebugEnabled) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index c03f542..4e180aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -314,7 +315,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { loadOfOneTable.remove(masterServerName); } - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); // construct a Cluster object with clusterMap and rest of the // argument as defaults @@ -501,7 +502,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { balanceOverall(regionsToReturn, serverBalanceInfo, fetchFromTail, regionsToMove, max, min); } - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); if (!regionsToMove.isEmpty() || neededRegions != 0) { // Emit data so can diagnose how balancer went astray. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java index 87f15c7..2e2e207 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java @@ -21,6 +21,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -98,11 +99,11 @@ public class DirScanPool implements ConfigurationObserver { return; } reconfigNotification = false; - long stopTime = System.currentTimeMillis() + timeout; + long stopTime = EnvironmentEdgeManager.currentTime() + timeout; while (cleanerLatch != 0 && timeout > 0) { try { wait(timeout); - timeout = stopTime - System.currentTimeMillis(); + timeout = stopTime - EnvironmentEdgeManager.currentTime(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java index ff28857..45b82e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.master.region.MasterRegionFactory; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.StealJobQueue; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -237,7 +238,7 @@ public class HFileCleaner extends CleanerChore } }; large.setDaemon(true); - large.setName(n + "-HFileCleaner.large." + i + "-" + System.currentTimeMillis()); + large.setName(n + "-HFileCleaner.large." + i + "-" + EnvironmentEdgeManager.currentTime()); large.start(); LOG.debug("Starting for large file={}", large); threads.add(large); @@ -252,7 +253,7 @@ public class HFileCleaner extends CleanerChore } }; small.setDaemon(true); - small.setName(n + "-HFileCleaner.small." + i + "-" + System.currentTimeMillis()); + small.setName(n + "-HFileCleaner.small." + i + "-" + EnvironmentEdgeManager.currentTime()); small.start(); LOG.debug("Starting for small files={}", small); threads.add(small); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java index aaf5152..4f020ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.procedure2.LockType; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.NonceKey; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -155,10 +156,12 @@ public final class LockManager { proc.setOwner(master.getMasterProcedureExecutor().getEnvironment().getRequestUser()); master.getMasterProcedureExecutor().submitProcedure(proc); - long deadline = (timeoutMs > 0) ? System.currentTimeMillis() + timeoutMs : Long.MAX_VALUE; - while (deadline >= System.currentTimeMillis() && !proc.isLocked()) { + long deadline = (timeoutMs > 0) ? EnvironmentEdgeManager.currentTime() + timeoutMs : + Long.MAX_VALUE; + while (deadline >= EnvironmentEdgeManager.currentTime() && !proc.isLocked()) { try { - lockAcquireLatch.await(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS); + lockAcquireLatch.await(deadline - EnvironmentEdgeManager.currentTime(), + TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.info("InterruptedException when waiting for lock: " + proc.toString()); // kind of weird, releasing a lock which is not locked. This is to make the procedure diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java index dc1e5bc..58705cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -183,14 +184,14 @@ public final class LockProcedure extends Procedure } private boolean hasHeartbeatExpired() { - return System.currentTimeMillis() - lastHeartBeat.get() >= getTimeout(); + return EnvironmentEdgeManager.currentTime() - lastHeartBeat.get() >= getTimeout(); } /** * Updates timeout deadline for the lock. */ public void updateHeartBeat() { - lastHeartBeat.set(System.currentTimeMillis()); + lastHeartBeat.set(EnvironmentEdgeManager.currentTime()); if (LOG.isDebugEnabled()) { LOG.debug("Heartbeat " + toString()); } @@ -312,7 +313,7 @@ public final class LockProcedure extends Procedure if (LOG.isDebugEnabled()) { LOG.debug("LOCKED " + toString()); } - lastHeartBeat.set(System.currentTimeMillis()); + lastHeartBeat.set(EnvironmentEdgeManager.currentTime()); return LockState.LOCK_ACQUIRED; } LOG.warn("Failed acquire LOCK " + toString() + "; YIELDING"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java index 7a475e2..e1acb6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java @@ -24,8 +24,8 @@ import java.util.Date; import java.util.LinkedList; import java.util.List; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - import org.apache.hbase.thirdparty.com.google.common.base.Charsets; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; @@ -54,7 +54,7 @@ public class MemoryBoundedLogMessageBuffer { * older messages until the desired memory limit is achieved. */ public synchronized void add(String messageText) { - LogMessage message = new LogMessage(messageText, System.currentTimeMillis()); + LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTime()); usage += message.estimateHeapUsage(); messages.add(message); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java index d194d10..96fffa4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java @@ -25,7 +25,7 @@ import java.util.Map; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Operation; import org.apache.hadoop.hbase.util.Bytes; - +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.com.google.protobuf.Message; /** @@ -196,7 +196,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl long queueTime) { this.methodName = methodName; this.params = params; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); this.rpcStartTime = now; setWarnTime(now); this.rpcQueueTime = queueTime; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java index af6a47a..011c198 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java @@ -24,6 +24,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; + +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -47,7 +49,7 @@ class MonitoredTaskImpl implements MonitoredTask { private static final Gson GSON = GsonUtil.createGson().create(); public MonitoredTaskImpl() { - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); statusTime = startTime; stateTime = startTime; warnTime = startTime; @@ -161,7 +163,7 @@ class MonitoredTaskImpl implements MonitoredTask { @Override public void setStatus(String status) { this.status = status; - statusTime = System.currentTimeMillis(); + statusTime = EnvironmentEdgeManager.currentTime(); if (journalEnabled) { journal.add(new StatusJournalEntryImpl(this.status, statusTime)); } @@ -169,7 +171,7 @@ class MonitoredTaskImpl implements MonitoredTask { protected void setState(State state) { this.state = state; - stateTime = System.currentTimeMillis(); + stateTime = EnvironmentEdgeManager.currentTime(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java index c0a8e1d..d6701cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/impl/SlowLogQueueService.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.namequeues.RpcLogDetails; import org.apache.hadoop.hbase.namequeues.SlowLogPersistentService; import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -128,7 +129,7 @@ public class SlowLogQueueService implements NamedQueueService { Message param = rpcLogDetails.getParam(); long receiveTime = rpcCall.getReceiveTime(); long startTime = rpcCall.getStartTime(); - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); int processingTime = (int) (endTime - startTime); int qTime = (int) (startTime - receiveTime); final SlowLogParams slowLogParams = ProtobufUtil.getSlowLogParams(param); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 954bcf4..467f3ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -832,7 +832,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /* * timestamp.slop provides a server-side constraint on the timestamp. This - * assumes that you base your TS around currentTimeMillis(). In this case, + * assumes that you base your TS around EnvironmentEdgeManager.currentTime(). In this case, * throw an error to the user if the user-specified TS is newer than now + * slop. LATEST_TIMESTAMP == don't use this functionality */ @@ -1938,7 +1938,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return true; } if (!writestate.flushing) return true; - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); long duration = 0; boolean interrupted = false; LOG.debug("waiting for cache flush to complete for region " + this); @@ -1954,7 +1954,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi interrupted = true; break; } finally { - duration = System.currentTimeMillis() - start; + duration = EnvironmentEdgeManager.currentTime() - start; } } } finally { @@ -6579,7 +6579,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (call.isPresent()) { long deadline = call.get().getDeadline(); if (deadline < Long.MAX_VALUE) { - int timeToDeadline = (int) (deadline - System.currentTimeMillis()); + int timeToDeadline = (int) (deadline - EnvironmentEdgeManager.currentTime()); if (timeToDeadline <= this.rowLockWaitDuration) { reachDeadlineFirst = true; timeout = timeToDeadline; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 6387747..6a78c9d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -586,7 +586,7 @@ public class HRegionServer extends Thread implements super("RegionServer"); // thread name TraceUtil.initTracer(conf); try { - this.startcode = System.currentTimeMillis(); + this.startcode = EnvironmentEdgeManager.currentTime(); this.conf = conf; this.dataFsOk = true; this.masterless = conf.getBoolean(MASTERLESS_CONFIG_NAME, false); @@ -1078,7 +1078,7 @@ public class HRegionServer extends Thread implements } // We registered with the Master. Go into run mode. - long lastMsg = System.currentTimeMillis(); + long lastMsg = EnvironmentEdgeManager.currentTime(); long oldRequestCount = -1; // The main run loop. while (!isStopped() && isHealthy()) { @@ -1109,10 +1109,10 @@ public class HRegionServer extends Thread implements LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString()); } } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if ((now - lastMsg) >= msgInterval) { tryRegionServerReport(lastMsg, now); - lastMsg = System.currentTimeMillis(); + lastMsg = EnvironmentEdgeManager.currentTime(); } if (!isStopped() && !isAborted()) { this.sleeper.sleep(); @@ -1501,8 +1501,8 @@ public class HRegionServer extends Thread implements // Only print a message if the count of regions has changed. if (count != lastCount) { // Log every second at most - if (System.currentTimeMillis() > (previousLogTime + 1000)) { - previousLogTime = System.currentTimeMillis(); + if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { + previousLogTime = EnvironmentEdgeManager.currentTime(); lastCount = count; LOG.info("Waiting on " + count + " regions to close"); // Only print out regions still closing if a small number else will @@ -2779,9 +2779,9 @@ public class HRegionServer extends Thread implements LOG.debug("No master found and cluster is stopped; bailing out"); return null; } - if (System.currentTimeMillis() > (previousLogTime + 1000)) { + if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { LOG.debug("No master found; retry"); - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTime(); } refresh = true; // let's try pull it from ZK directly if (sleepInterrupted(200)) { @@ -2804,7 +2804,7 @@ public class HRegionServer extends Thread implements intLockStub = LockService.newBlockingStub(channel); break; } catch (IOException e) { - if (System.currentTimeMillis() > (previousLogTime + 1000)) { + if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) { e = e instanceof RemoteException ? ((RemoteException)e).unwrapRemoteException() : e; if (e instanceof ServerNotRunningYetException) { @@ -2812,7 +2812,7 @@ public class HRegionServer extends Thread implements } else { LOG.warn("Unable to connect to master. Retrying. Error was:", e); } - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTime(); } if (sleepInterrupted(200)) { interrupted = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 0ef8317..9f1b3b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -2044,10 +2044,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // We are assigning meta, wait a little for regionserver to finish initialization. int timeout = regionServer.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout - long endTime = System.currentTimeMillis() + timeout; + long endTime = EnvironmentEdgeManager.currentTime() + timeout; synchronized (regionServer.online) { try { - while (System.currentTimeMillis() <= endTime + while (EnvironmentEdgeManager.currentTime() <= endTime && !regionServer.isStopped() && !regionServer.isOnline()) { regionServer.online.wait(regionServer.msgInterval); } @@ -3255,7 +3255,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, timeLimitDelta = Math.max(timeLimitDelta / 2, minimumScanTimeLimitDelta); // XXX: Can not use EnvironmentEdge here because TestIncrementTimeRange use a // ManualEnvironmentEdge. Consider using System.nanoTime instead. - return System.currentTimeMillis() + timeLimitDelta; + return EnvironmentEdgeManager.currentTime() + timeLimitDelta; } // Default value of timeLimit is negative to indicate no timeLimit should be // enforced. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index 4ba6913..2dced9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -21,9 +21,10 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; /** * ScannerContext instances encapsulate limit tracking AND progress towards those limits during @@ -55,7 +56,7 @@ public class ScannerContext { /** * A different set of progress fields. Only include batch, dataSize and heapSize. Compare to * LimitFields, ProgressFields doesn't contain time field. As we save a deadline in LimitFields, - * so use {@link System#currentTimeMillis()} directly when check time limit. + * so use {@link EnvironmentEdgeManager.currentTime()} directly when check time limit. */ ProgressFields progress; @@ -171,7 +172,7 @@ public class ScannerContext { } /** - * Update the time progress with {@link System#currentTimeMillis()} + * Update the time progress. * @deprecated will be removed in 3.0 */ @Deprecated @@ -195,7 +196,7 @@ public class ScannerContext { */ @Deprecated long getTimeProgress() { - return System.currentTimeMillis(); + return EnvironmentEdgeManager.currentTime(); } /** @@ -341,12 +342,12 @@ public class ScannerContext { /** * @param checkerScope The scope that the limit is being checked from. The time limit is always - * checked against {@link System#currentTimeMillis()} + * checked against {@link EnvironmentEdgeManager.currentTime} * @return true when the limit is enforceable from the checker's scope and it has been reached */ boolean checkTimeLimit(LimitScope checkerScope) { return hasTimeLimit(checkerScope) && - (returnImmediately || System.currentTimeMillis() >= limits.getTime()); + (returnImmediately || EnvironmentEdgeManager.currentTime() >= limits.getTime()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 8ff292c..a92147c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -147,7 +147,7 @@ public abstract class Compactor { private FileDetails getFileDetails( Collection filesToCompact, boolean allFiles, boolean major) throws IOException { FileDetails fd = new FileDetails(); - long oldestHFileTimestampToKeepMVCC = System.currentTimeMillis() - + long oldestHFileTimestampToKeepMVCC = EnvironmentEdgeManager.currentTime() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); for (HStoreFile file : filesToCompact) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java index ff39531..ffdade1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status; import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Handles log splitting a wal @@ -68,7 +69,7 @@ public class WALSplitterHandler extends EventHandler { @Override public void process() throws IOException { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); Status status = null; try { status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), reporter); @@ -101,7 +102,7 @@ public class WALSplitterHandler extends EventHandler { } } finally { LOG.info("Worker " + serverName + " done with task " + splitTaskDetails.toString() + " in " - + (System.currentTimeMillis() - startTime) + "ms. Status = " + status); + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms. Status = " + status); this.inProgressTasks.decrementAndGet(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java index d364288..79352b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java @@ -581,7 +581,7 @@ public abstract class AbstractFSWAL implements WAL { * retrieve the next path to use for writing. Increments the internal filenum. */ private Path getNewPath() throws IOException { - this.filenum.set(System.currentTimeMillis()); + this.filenum.set(EnvironmentEdgeManager.currentTime()); Path newPath = getCurrentFileName(); while (fs.exists(newPath)) { this.filenum.incrementAndGet(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java index 19f6b96..221f90c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKeyImpl; @@ -155,7 +156,7 @@ public class WALUtil { throws IOException { // TODO: Pass in current time to use? WALKeyImpl walKey = new WALKeyImpl(hri.getEncodedNameAsBytes(), hri.getTable(), - System.currentTimeMillis(), mvcc, replicationScope, extendedAttributes); + EnvironmentEdgeManager.currentTime(), mvcc, replicationScope, extendedAttributes); long trx = MultiVersionConcurrencyControl.NONE; try { trx = wal.appendMarker(hri, walKey, edit); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index a998a3c..15d6fbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -26,6 +26,7 @@ import java.util.UUID; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -193,13 +194,13 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint */ public synchronized void setRegionServers(List regionServers) { this.regionServers = regionServers; - lastRegionServerUpdate = System.currentTimeMillis(); + lastRegionServerUpdate = EnvironmentEdgeManager.currentTime(); } /** * Get the timestamp at which the last change occurred to the list of region servers to replicate * to. - * @return The System.currentTimeMillis at the last time the list of peer region servers changed. + * @return The last time the list of peer region servers changed. */ public long getLastRegionServerUpdate() { return lastRegionServerUpdate; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 5201d6e..8e1969f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.ReplicationTracker; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; @@ -211,8 +212,9 @@ public class DumpReplicationQueues extends Configured implements Tool { ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); - ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), - new WarnOnlyAbortable(), true); + ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + + EnvironmentEdgeManager.currentTime(), + new WarnOnlyAbortable(), true); try { // Our zk watcher diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 6f3c85f..cb16ac5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; @@ -515,12 +516,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi int numSinks = replicationSinkMgr.getNumSinks(); if (numSinks == 0) { - if((System.currentTimeMillis() - lastSinkFetchTime) >= (maxRetriesMultiplier*1000)) { + if ((EnvironmentEdgeManager.currentTime() - lastSinkFetchTime) >= + (maxRetriesMultiplier*1000)) { LOG.warn( "No replication sinks found, returning without replicating. " + "The source should retry with the same set of edits. Not logging this again for " + "the next {} seconds.", maxRetriesMultiplier); - lastSinkFetchTime = System.currentTimeMillis(); + lastSinkFetchTime = EnvironmentEdgeManager.currentTime(); } sleepForRetries("No sinks available at peer", sleepMultiplier); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 6817058..ce785bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * This class is for maintaining the various replication statistics for a sink and publishing them @@ -28,8 +29,8 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory; @InterfaceAudience.Private public class MetricsSink { - private long lastTimestampForAge = System.currentTimeMillis(); - private long startTimestamp = System.currentTimeMillis(); + private long lastTimestampForAge = EnvironmentEdgeManager.currentTime(); + private long startTimestamp = EnvironmentEdgeManager.currentTime(); private final MetricsReplicationSinkSource mss; public MetricsSink() { @@ -47,7 +48,7 @@ public class MetricsSink { long age = 0; if (lastTimestampForAge != timestamp) { lastTimestampForAge = timestamp; - age = System.currentTimeMillis() - lastTimestampForAge; + age = EnvironmentEdgeManager.currentTime() - lastTimestampForAge; } mss.setLastAppliedOpAge(age); return age; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java index a233196..01cfe88 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java @@ -27,13 +27,16 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; + /** * Maintains a collection of peers to replicate to, and randomly selects a * single peer to replicate to per set of data to replicate. Also handles @@ -163,7 +166,7 @@ public class ReplicationSinkManager { Collections.shuffle(slaveAddresses, random); int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); sinks = slaveAddresses.subList(0, numSinks); - lastUpdateToPeers = System.currentTimeMillis(); + lastUpdateToPeers = EnvironmentEdgeManager.currentTime(); badReportCounts.clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java index f188e7b..67f9358 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALEdit; @@ -350,10 +351,10 @@ public class ReplicationSourceShipper extends Thread { * have been triggered interruption/termination prior to calling this method. */ void clearWALEntryBatch() { - long timeout = System.currentTimeMillis() + this.shipEditsTimeout; + long timeout = EnvironmentEdgeManager.currentTime() + this.shipEditsTimeout; while(this.isAlive() || this.entryReader.isAlive()){ try { - if (System.currentTimeMillis() >= timeout) { + if (EnvironmentEdgeManager.currentTime() >= timeout) { LOG.warn("Shipper clearWALEntryBatch method timed out whilst waiting reader/shipper " + "thread to stop. Not cleaning buffer usage. Shipper alive: {}; Reader alive: {}", this.source.getPeerId(), this.isAlive(), this.entryReader.isAlive()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index a8fd640..b6386cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; @@ -74,8 +75,9 @@ public class ReplicationSyncUp extends Configured implements Tool { } }; Configuration conf = getConf(); - try (ZKWatcher zkw = - new ZKWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable, true)) { + try (ZKWatcher zkw = new ZKWatcher(conf, + "syncupReplication" + EnvironmentEdgeManager.currentTime(), + abortable, true)) { Path walRootDir = CommonFSUtils.getWALRootDir(conf); FileSystem fs = CommonFSUtils.getWALFileSystem(conf); Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -107,7 +109,7 @@ public class ReplicationSyncUp extends Configured implements Tool { DummyServer(ZKWatcher zkw) { // a unique name in case the first run fails - hostname = System.currentTimeMillis() + ".SyncUpTool.replication.org"; + hostname = EnvironmentEdgeManager.currentTime() + ".SyncUpTool.replication.org"; this.zkw = zkw; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java index f9f05db..ffe8dab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -154,11 +155,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { public boolean grantAcl(UserPermission userPermission, Set skipNamespaces, Set skipTables) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces, skipTables); LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when grant: {}", userPermission, e); @@ -176,11 +177,11 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { public boolean revokeAcl(UserPermission userPermission, Set skipNamespaces, Set skipTables) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.REMOVE, skipNamespaces, skipTables); LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when revoke: {}", userPermission, e); @@ -195,7 +196,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ public boolean snapshotAcl(SnapshotDescription snapshot) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); TableName tableName = snapshot.getTableName(); // global user permission can be inherited from default acl automatically Set userSet = getUsersWithTableReadAction(tableName, true, false); @@ -205,7 +206,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get(); } LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(), - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when snapshot {}", snapshot, e); @@ -222,13 +223,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { public boolean removeNamespaceAccessAcl(TableName tableName, Set removeUsers, String operation) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); if (removeUsers.size() > 0) { handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers, HDFSAclOperation.OperationType.REMOVE); } LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e); @@ -244,13 +245,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ public boolean removeNamespaceDefaultAcl(String namespace, Set removeUsers) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); Path archiveNsDir = pathHelper.getArchiveNsDir(namespace); HDFSAclOperation operation = new HDFSAclOperation(fs, archiveNsDir, removeUsers, HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); operation.handleAcl(); LOG.info("Remove HDFS acl when delete namespace {}, cost {} ms", namespace, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Remove HDFS acl error when delete namespace {}", namespace, e); @@ -266,13 +267,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ public boolean removeTableDefaultAcl(TableName tableName, Set removeUsers) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); Path archiveTableDir = pathHelper.getArchiveTableDir(tableName); HDFSAclOperation operation = new HDFSAclOperation(fs, archiveTableDir, removeUsers, HDFSAclOperation.OperationType.REMOVE, false, HDFSAclOperation.AclType.DEFAULT); operation.handleAcl(); LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Remove HDFS acl error when delete table {}", tableName, e); @@ -288,7 +289,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ public boolean addTableAcl(TableName tableName, Set users, String operation) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); if (users.size() > 0) { HDFSAclOperation.OperationType operationType = HDFSAclOperation.OperationType.MODIFY; handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, operationType); @@ -296,7 +297,7 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { operationType); } LOG.info("Set HDFS acl when {} table {}, cost {} ms", operation, tableName, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when {} table {}", operation, tableName, e); @@ -312,13 +313,13 @@ public class SnapshotScannerHDFSAclHelper implements Closeable { */ public boolean removeTableAcl(TableName tableName, Set users) { try { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); if (users.size() > 0) { handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new HashSet<>(0), HDFSAclOperation.OperationType.REMOVE); } LOG.info("Set HDFS acl when create or modify table {}, cost {} ms", tableName, - System.currentTimeMillis() - start); + EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when create or modify table {}", tableName, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java index c4d44ec..ab2e1ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/CanaryTool.java @@ -615,9 +615,9 @@ public class CanaryTool implements Tool, Canary { tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(), Bytes.toStringBinary(rowToCheck)); try { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); table.put(put); - long time = System.currentTimeMillis() - startTime; + long time = EnvironmentEdgeManager.currentTime() - startTime; this.readWriteLatency.add(time); sink.publishWriteTiming(serverName, region, column, time); } catch (Exception e) { @@ -1017,8 +1017,8 @@ public class CanaryTool implements Tool, Canary { // Do monitor !! try { monitor = this.newMonitor(connection, monitorTargets); - monitorThread = new Thread(monitor, "CanaryMonitor-" + System.currentTimeMillis()); - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); + monitorThread = new Thread(monitor, "CanaryMonitor-" + startTime); monitorThread.start(); while (!monitor.isDone()) { // wait for 1 sec @@ -1032,7 +1032,7 @@ public class CanaryTool implements Tool, Canary { return INIT_ERROR_EXIT_CODE; } } - currentTimeLength = System.currentTimeMillis() - startTime; + currentTimeLength = EnvironmentEdgeManager.currentTime() - startTime; if (currentTimeLength > timeout) { LOG.error("The monitor is running too long (" + currentTimeLength + ") after timeout limit:" + timeout diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 91775b9..a0b68d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -608,7 +608,7 @@ public final class FSUtils { throws IOException { // Rewrite the file as pb. Move aside the old one first, write new // then delete the moved-aside file. - Path movedAsideName = new Path(p + "." + System.currentTimeMillis()); + Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p); setClusterId(fs, rootdir, cid, 100); if (!fs.delete(movedAsideName, false)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index cc0f49a..1e2ac3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -282,14 +282,14 @@ public class JVMClusterUtil { } } boolean wasInterrupted = false; - final long maxTime = System.currentTimeMillis() + 30 * 1000; + final long maxTime = EnvironmentEdgeManager.currentTime() + 30 * 1000; if (regionservers != null) { // first try nicely. for (RegionServerThread t : regionservers) { t.getRegionServer().stop("Shutdown requested"); } for (RegionServerThread t : regionservers) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (t.isAlive() && !wasInterrupted && now < maxTime) { try { t.join(maxTime - now); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b4e5863..67a94f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -63,7 +63,7 @@ public abstract class ModifyRegionUtils { public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor, byte[][] splitKeys) { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo[] hRegionInfos = null; if (splitKeys == null || splitKeys.length == 0) { hRegionInfos = new RegionInfo[]{ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 791b927..760ded5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -489,7 +489,7 @@ public class RegionSplitter { daughterRegions.get(rsLocation).add(dr); } LOG.debug("Done with bucketing. Split time!"); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); // Open the split file and modify it as splits finish byte[] rawData = readFile(fs, splitFile); @@ -605,7 +605,7 @@ public class RegionSplitter { + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { - long tDiff = (System.currentTimeMillis() - startTime) + long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) / splitCount; LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " @@ -634,7 +634,7 @@ public class RegionSplitter { } LOG.debug("All regions have been successfully split!"); } finally { - long tDiff = System.currentTimeMillis() - startTime; + long tDiff = EnvironmentEdgeManager.currentTime() - startTime; LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java index 0eda459..70e58f6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactionTTLRequest.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +86,7 @@ public class MajorCompactionTTLRequest extends MajorCompactionRequest { if (colDesc.getTimeToLive() == HConstants.FOREVER) { return -1; } - return System.currentTimeMillis() - (colDesc.getTimeToLive() * 1000L); + return EnvironmentEdgeManager.currentTime() - (colDesc.getTimeToLive() * 1000L); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java index 370a3e8..d841ab4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/compaction/MajorCompactor.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.yetus.audience.InterfaceAudience; @@ -487,7 +488,8 @@ public class MajorCompactor extends Configured implements Tool { Configuration configuration = getConf(); int concurrency = Integer.parseInt(commandLine.getOptionValue("servers")); long minModTime = Long.parseLong( - commandLine.getOptionValue("minModTime", String.valueOf(System.currentTimeMillis()))); + commandLine.getOptionValue("minModTime", + String.valueOf(EnvironmentEdgeManager.currentTime()))); String quorum = commandLine.getOptionValue("zk", configuration.get(HConstants.ZOOKEEPER_QUORUM)); String rootDir = commandLine.getOptionValue("rootDir", configuration.get(HConstants.HBASE_DIR)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java index 3f20dfd..53dfa85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -152,7 +153,7 @@ public abstract class AbstractWALRoller extends Thread @Override public void run() { while (running) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); checkLowReplication(now); synchronized (this) { if (wals.values().stream().noneMatch(rc -> rc.needsRoll(now))) { @@ -223,7 +224,9 @@ public abstract class AbstractWALRoller extends Thread * @return true if all WAL roll finished */ public boolean walRollFinished() { - return wals.values().stream().noneMatch(rc -> rc.needsRoll(System.currentTimeMillis())) + // TODO add a status field of roll in RollController + return wals.values().stream() + .noneMatch(rc -> rc.needsRoll(EnvironmentEdgeManager.currentTime())) && isWaiting(); } @@ -254,7 +257,7 @@ public abstract class AbstractWALRoller extends Thread RollController(WAL wal) { this.wal = wal; this.rollRequest = new AtomicBoolean(false); - this.lastRollTime = System.currentTimeMillis(); + this.lastRollTime = EnvironmentEdgeManager.currentTime(); } public void requestRoll() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java index 2354b46..11baddb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitUtil.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.ConcurrentMapUtils.IOExceptionSupplier; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; @@ -333,7 +334,7 @@ public final class WALSplitUtil { public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) throws IOException { Path moveAsideName = - new Path(edits.getParent(), edits.getName() + "." + System.currentTimeMillis()); + new Path(edits.getParent(), edits.getName() + "." + EnvironmentEdgeManager.currentTime()); if (!fs.rename(edits, moveAsideName)) { LOG.warn("Rename failed from {} to {}", edits, moveAsideName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index 23ab048..939481c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -23,6 +23,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -151,8 +152,8 @@ public abstract class HBaseCluster implements Closeable, Configurable { */ public void waitForRegionServerToStart(String hostname, int port, long timeout) throws IOException { - long start = System.currentTimeMillis(); - while ((System.currentTimeMillis() - start) < timeout) { + long start = EnvironmentEdgeManager.currentTime(); + while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { for (ServerName server : getClusterMetrics().getLiveServerMetrics().keySet()) { if (server.getHostname().equals(hostname) && server.getPort() == port) { return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index f130f31..1c4636e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -124,6 +124,7 @@ import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; @@ -3099,9 +3100,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { //ensure that we have connection to the server before closing down, otherwise //the close session event will be eaten out before we start CONNECTING state - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (newZK.getState() != States.CONNECTED - && System.currentTimeMillis() - start < 1000) { + && EnvironmentEdgeManager.currentTime() - start < 1000) { Thread.sleep(1); } newZK.close(); @@ -4148,11 +4149,11 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public void assertRegionOnServer( final RegionInfo hri, final ServerName server, final long timeout) throws IOException, InterruptedException { - long timeoutTime = System.currentTimeMillis() + timeout; + long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > timeoutTime) break; Thread.sleep(10); } @@ -4167,7 +4168,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { public void assertRegionOnlyOnServer( final RegionInfo hri, final ServerName server, final long timeout) throws IOException, InterruptedException { - long timeoutTime = System.currentTimeMillis() + timeout; + long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout; while (true) { List regions = getAdmin().getRegions(server); if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) { @@ -4186,7 +4187,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { } return; // good, we are happy } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > timeoutTime) break; Thread.sleep(10); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index 9268b1e..82b068d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * This class runs performance benchmarks for {@link HFile}. @@ -317,7 +318,7 @@ public class HFilePerformanceEvaluation { long run() throws Exception { long elapsedTime; setUp(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { for (int i = 0; i < totalRows; i++) { if (i > 0 && i % getReportingPeriod() == 0) { @@ -325,7 +326,7 @@ public class HFilePerformanceEvaluation { } doRow(i); } - elapsedTime = System.currentTimeMillis() - startTime; + elapsedTime = EnvironmentEdgeManager.currentTime() - startTime; } finally { tearDown(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index f795eef..4b2886e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -448,9 +449,9 @@ public class MiniHBaseCluster extends HBaseCluster { JVMClusterUtil.RegionServerThread t = startRegionServer(); ServerName rsServerName = t.getRegionServer().getServerName(); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); ClusterStatus clusterStatus = getClusterStatus(); - while ((System.currentTimeMillis() - start) < timeout) { + while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { if (clusterStatus != null && clusterStatus.getServers().contains(rsServerName)) { return t; } @@ -661,9 +662,9 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public boolean waitForActiveAndReadyMaster(long timeout) throws IOException { List mts; - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (!(mts = getMasterThreads()).isEmpty() - && (System.currentTimeMillis() - start) < timeout) { + && (EnvironmentEdgeManager.currentTime() - start) < timeout) { for (JVMClusterUtil.MasterThread mt : mts) { if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) { return true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java index 99aef64..5268d3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MultithreadedTestUtil.java @@ -26,6 +26,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,9 +65,9 @@ public abstract class MultithreadedTestUtil { } public void waitFor(long millis) throws Exception { - long endTime = System.currentTimeMillis() + millis; + long endTime = EnvironmentEdgeManager.currentTime() + millis; while (!stopped) { - long left = endTime - System.currentTimeMillis(); + long left = endTime - EnvironmentEdgeManager.currentTime(); if (left <= 0) break; synchronized (this) { checkException(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java index f919db7..97d326a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluationCommons.java @@ -22,6 +22,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,7 +67,7 @@ public class PerformanceEvaluationCommons { public static void concurrentReads(final Runnable r) { final int count = 1; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); List threads = new ArrayList<>(count); for (int i = 0; i < count; i++) { threads.add(new Thread(r, "concurrentRead-" + i)); @@ -81,6 +82,6 @@ public class PerformanceEvaluationCommons { e.printStackTrace(); } } - LOG.info("Test took " + (System.currentTimeMillis() - now)); + LOG.info("Test took " + (EnvironmentEdgeManager.currentTime() - now)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java index 9aecf06..4990e8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Threads; import org.junit.ClassRule; @@ -117,10 +118,10 @@ public class TestGlobalMemStoreSize { flush(r, server); } LOG.info("Post flush on " + server.getServerName()); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); long timeout = now + 1000; while(server.getRegionServerAccounting().getGlobalMemStoreDataSize() != 0 && - timeout < System.currentTimeMillis()) { + timeout < EnvironmentEdgeManager.currentTime()) { Threads.sleep(10); } long size = server.getRegionServerAccounting().getGlobalMemStoreDataSize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index cfff0c4..8adffd3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.wal.WAL; import org.junit.ClassRule; @@ -299,7 +300,7 @@ public class TestIOFencing { oldHri, compactionDescriptor, compactingRegion.getMVCC()); // Wait till flush has happened, otherwise there won't be multiple store files - long startWaitTime = System.currentTimeMillis(); + long startWaitTime = EnvironmentEdgeManager.currentTime(); while (compactingRegion.getEarliestFlushTimeForAllStores() <= lastFlushTime || compactingRegion.countStoreFiles() <= 1) { LOG.info("Waiting for the region to flush " + @@ -307,7 +308,7 @@ public class TestIOFencing { Thread.sleep(1000); admin.flush(table.getName()); assertTrue("Timed out waiting for the region to flush", - System.currentTimeMillis() - startWaitTime < 30000); + EnvironmentEdgeManager.currentTime() - startWaitTime < 30000); } assertTrue(compactingRegion.countStoreFiles() > 1); final byte REGION_NAME[] = compactingRegion.getRegionInfo().getRegionName(); @@ -321,7 +322,7 @@ public class TestIOFencing { LOG.info("Killing region server ZK lease"); TEST_UTIL.expireRegionServerSession(0); CompactionBlockerRegion newRegion = null; - startWaitTime = System.currentTimeMillis(); + startWaitTime = EnvironmentEdgeManager.currentTime(); LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME)); // wait for region to be assigned and to go out of log replay if applicable @@ -355,11 +356,11 @@ public class TestIOFencing { TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT); admin.majorCompact(TABLE_NAME); - startWaitTime = System.currentTimeMillis(); + startWaitTime = EnvironmentEdgeManager.currentTime(); while (newRegion.compactCount.get() == 0) { Thread.sleep(1000); assertTrue("New region never compacted", - System.currentTimeMillis() - startWaitTime < 180000); + EnvironmentEdgeManager.currentTime() - startWaitTime < 180000); } int count; for (int i = 0;; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java index 3bcf906..9ee78dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java @@ -232,7 +232,7 @@ public class TestMetaTableAccessor { // it as a fail. We can't put that in the @Test tag as we want to close // the threads nicely final long timeOut = 180000; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { // Make sure reader and writer are working. @@ -247,8 +247,7 @@ public class TestMetaTableAccessor { int index = -1; do { index = UTIL.getMiniHBaseCluster().getServerWithMeta(); - } while (index == -1 && - startTime + timeOut < System.currentTimeMillis()); + } while (index == -1 && startTime + timeOut < EnvironmentEdgeManager.currentTime()); if (index != -1){ UTIL.getMiniHBaseCluster().abortRegionServer(index); @@ -267,7 +266,7 @@ public class TestMetaTableAccessor { writer.join(); t.close(); } - long exeTime = System.currentTimeMillis() - startTime; + long exeTime = EnvironmentEdgeManager.currentTime() - startTime; assertTrue("Timeout: test took " + exeTime / 1000 + " sec", exeTime < timeOut); } @@ -454,7 +453,7 @@ public class TestMetaTableAccessor { ServerName serverName1 = ServerName.valueOf("bar", 60010, random.nextLong()); ServerName serverName100 = ServerName.valueOf("baz", 60010, random.nextLong()); - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(HConstants.EMPTY_START_ROW) .setEndKey(HConstants.EMPTY_END_ROW) @@ -541,7 +540,7 @@ public class TestMetaTableAccessor { @Test public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(HConstants.EMPTY_START_ROW) .setEndKey(HConstants.EMPTY_END_ROW) @@ -564,7 +563,7 @@ public class TestMetaTableAccessor { @Test public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(HConstants.EMPTY_START_ROW) @@ -604,7 +603,7 @@ public class TestMetaTableAccessor { @Test public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); RegionInfo parentA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) @@ -697,7 +696,7 @@ public class TestMetaTableAccessor { */ @Test public void testMastersSystemTimeIsUsedInUpdateLocations() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo regionInfo = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(HConstants.EMPTY_START_ROW) .setEndKey(HConstants.EMPTY_END_ROW) @@ -736,7 +735,7 @@ public class TestMetaTableAccessor { @Test public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey(HConstants.EMPTY_START_ROW) @@ -891,7 +890,7 @@ public class TestMetaTableAccessor { @Test public void testEmptyMetaDaughterLocationDuringSplit() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo")) .setStartKey(HConstants.EMPTY_START_ROW) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index 75a86e2..d55cc7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -34,19 +34,17 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.HBaseRpcController; -import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.internal.matchers.Any; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.Logger; @@ -147,8 +145,8 @@ public class TestMetaTableAccessorNoCluster { ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), ABORTABLE, true); // This is a servername we use in a few places below. - ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis()); - + ServerName sn = ServerName.valueOf("example.com", 1234, + EnvironmentEdgeManager.currentTime()); ClusterConnection connection = null; try { // Mock an ClientProtocol. Our mock implementation will fail a few diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java index 9274fa0..db72ae7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java @@ -25,6 +25,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -60,7 +61,7 @@ public class TestMetaTableLocator { private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableLocator.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final ServerName SN = - ServerName.valueOf("example.org", 1234, System.currentTimeMillis()); + ServerName.valueOf("example.org", 1234, EnvironmentEdgeManager.currentTime()); private ZKWatcher watcher; private Abortable abortable; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 9f0cf6b..029fe8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.DataInputBuffer; import org.junit.ClassRule; import org.junit.Test; @@ -59,12 +60,13 @@ public class TestSerialization { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSerialization.class); - @Test public void testKeyValue() throws Exception { + @Test + public void testKeyValue() throws Exception { final String name = "testKeyValue2"; byte[] row = name.getBytes(); byte[] fam = "fam".getBytes(); byte[] qf = "qf".getBytes(); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); byte[] val = "val".getBytes(); KeyValue kv = new KeyValue(row, fam, qf, ts, val); ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -182,119 +184,12 @@ public class TestSerialization { HConstants.EMPTY_END_ROW); } - /* - * TODO - @Test public void testPut() throws Exception{ - byte[] row = "row".getBytes(); - byte[] fam = "fam".getBytes(); - byte[] qf1 = "qf1".getBytes(); - byte[] qf2 = "qf2".getBytes(); - byte[] qf3 = "qf3".getBytes(); - byte[] qf4 = "qf4".getBytes(); - byte[] qf5 = "qf5".getBytes(); - byte[] qf6 = "qf6".getBytes(); - byte[] qf7 = "qf7".getBytes(); - byte[] qf8 = "qf8".getBytes(); - - long ts = System.currentTimeMillis(); - byte[] val = "val".getBytes(); - - Put put = new Put(row); - put.setWriteToWAL(false); - put.add(fam, qf1, ts, val); - put.add(fam, qf2, ts, val); - put.add(fam, qf3, ts, val); - put.add(fam, qf4, ts, val); - put.add(fam, qf5, ts, val); - put.add(fam, qf6, ts, val); - put.add(fam, qf7, ts, val); - put.add(fam, qf8, ts, val); - - byte[] sb = Writables.getBytes(put); - Put desPut = (Put)Writables.getWritable(sb, new Put()); - - //Timing test -// long start = System.nanoTime(); -// desPut = (Put)Writables.getWritable(sb, new Put()); -// long stop = System.nanoTime(); -// System.out.println("timer " +(stop-start)); - - assertTrue(Bytes.equals(put.getRow(), desPut.getRow())); - List list = null; - List desList = null; - for(Map.Entry> entry : put.getFamilyMap().entrySet()){ - assertTrue(desPut.getFamilyMap().containsKey(entry.getKey())); - list = entry.getValue(); - desList = desPut.getFamilyMap().get(entry.getKey()); - for(int i=0; i list = null; - List desList = null; - for(Map.Entry> entry : put.getFamilyMap().entrySet()){ - assertTrue(desPut.getFamilyMap().containsKey(entry.getKey())); - list = entry.getValue(); - desList = desPut.getFamilyMap().get(entry.getKey()); - for(int i=0; i list = null; - List desList = null; - for(Map.Entry> entry : - delete.getFamilyMap().entrySet()){ - assertTrue(desDelete.getFamilyMap().containsKey(entry.getKey())); - list = entry.getValue(); - desList = desDelete.getFamilyMap().get(entry.getKey()); - for(int i=0; i storeFiles, long timeout) throws IOException { - long end = System.currentTimeMillis() + timeout; + long end = EnvironmentEdgeManager.currentTime() + timeout; Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()); List archivedFiles = new ArrayList<>(); // We have to ensure that the DeleteTableHandler is finished. HBaseAdmin.deleteXXX() can return before all files // are archived. We should fix HBASE-5487 and fix synchronous operations from admin. - while (System.currentTimeMillis() < end) { + while (EnvironmentEdgeManager.currentTime() < end) { archivedFiles = getAllFileNames(fs, archiveDir); if (archivedFiles.size() >= storeFiles.size()) { break; @@ -586,8 +587,8 @@ public class TestHFileArchiving { choreService.scheduleChore(cleaner); // Keep creating/archiving new files while the cleaner is running in the other thread - long startTime = System.currentTimeMillis(); - for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) { + long startTime = EnvironmentEdgeManager.currentTime(); + for (long fid = 0; (EnvironmentEdgeManager.currentTime() - startTime) < TEST_TIME; ++fid) { Path file = new Path(familyDir, String.valueOf(fid)); Path sourceFile = new Path(rootDir, file); Path archiveFile = new Path(archiveDir, file); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java index e8c0167..0340bdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientAfterSplittingRegionTestBase.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Test; public class CloneSnapshotFromClientAfterSplittingRegionTestBase @@ -53,7 +54,7 @@ public class CloneSnapshotFromClientAfterSplittingRegionTestBase // Clone the snapshot to another table TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName); SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); @@ -93,7 +94,7 @@ public class CloneSnapshotFromClientAfterSplittingRegionTestBase // Clone the snapshot to another table TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName); SnapshotTestingUtils.waitForTableToBeOnline(TEST_UTIL, clonedTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java index 254aeac..1d7e67c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientCloneLinksAfterDeleteTestBase.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Test; public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase @@ -31,7 +32,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase public void testCloneLinksAfterDelete() throws IOException, InterruptedException { // Clone a table from the first snapshot final TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "1-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "1-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName0, clonedTableName); verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows); @@ -41,7 +42,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase // Clone the snapshot of the cloned table final TableName clonedTableName2 = - TableName.valueOf(getValidMethodName() + "2-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "2-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName2); verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows); admin.disableTable(clonedTableName2); @@ -69,7 +70,7 @@ public class CloneSnapshotFromClientCloneLinksAfterDeleteTestBase // Clone a new table from cloned final TableName clonedTableName3 = - TableName.valueOf(getValidMethodName() + "3-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "3-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName2, clonedTableName3); verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java index 04df8e4..d660fff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientErrorTestBase.java @@ -21,15 +21,16 @@ import java.io.IOException; import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Test; public class CloneSnapshotFromClientErrorTestBase extends CloneSnapshotFromClientTestBase { @Test(expected = SnapshotDoesNotExistException.class) public void testCloneNonExistentSnapshot() throws IOException, InterruptedException { - String snapshotName = "random-snapshot-" + System.currentTimeMillis(); + String snapshotName = "random-snapshot-" + EnvironmentEdgeManager.currentTime(); final TableName tableName = - TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName, tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java index 3e26076..852ee91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientNormalTestBase.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Test; public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClientTestBase { @@ -28,7 +29,7 @@ public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClie @Test public void testCloneSnapshot() throws IOException, InterruptedException { TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); testCloneSnapshot(clonedTableName, emptySnapshot, 0); @@ -50,10 +51,10 @@ public class CloneSnapshotFromClientNormalTestBase extends CloneSnapshotFromClie @Test public void testCloneSnapshotCrossNamespace() throws IOException, InterruptedException { - String nsName = getValidMethodName() + "_ns_" + System.currentTimeMillis(); + String nsName = getValidMethodName() + "_ns_" + EnvironmentEdgeManager.currentTime(); admin.createNamespace(NamespaceDescriptor.create(nsName).build()); final TableName clonedTableName = - TableName.valueOf(nsName, getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(nsName, getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); testCloneSnapshot(clonedTableName, emptySnapshot, 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java index c74ec1d..1922219 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/CloneSnapshotFromClientTestBase.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -84,8 +85,7 @@ public class CloneSnapshotFromClientTestBase { @Before public void setup() throws Exception { this.admin = TEST_UTIL.getAdmin(); - - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); tableName = TableName.valueOf(getValidMethodName() + tid); emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid); snapshotName0 = Bytes.toBytes("snaptb0-" + tid); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java index 07625f9..101ba9c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientCloneTestBase.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Test; public class RestoreSnapshotFromClientCloneTestBase extends RestoreSnapshotFromClientTestBase { @@ -27,7 +28,7 @@ public class RestoreSnapshotFromClientCloneTestBase extends RestoreSnapshotFromC @Test public void testCloneSnapshotOfCloned() throws IOException, InterruptedException { TableName clonedTableName = - TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); admin.cloneSnapshot(snapshotName0, clonedTableName); verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows); SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java index f4f2698..89da008 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Test; public class RestoreSnapshotFromClientSimpleTestBase extends RestoreSnapshotFromClientTestBase { @@ -65,7 +66,7 @@ public class RestoreSnapshotFromClientSimpleTestBase extends RestoreSnapshotFrom public void testCorruptedSnapshot() throws IOException, InterruptedException { SnapshotTestingUtils.corruptSnapshot(TEST_UTIL, Bytes.toString(snapshotName0)); TableName cloneName = - TableName.valueOf(getValidMethodName() + "-" + System.currentTimeMillis()); + TableName.valueOf(getValidMethodName() + "-" + EnvironmentEdgeManager.currentTime()); try { admin.cloneSnapshot(snapshotName0, cloneName); fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java index 047640f..edd636a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientTestBase.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -81,8 +82,7 @@ public class RestoreSnapshotFromClientTestBase { @Before public void setup() throws Exception { this.admin = TEST_UTIL.getAdmin(); - - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); tableName = TableName.valueOf(getValidMethodName() + "-" + tid); emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid); snapshotName0 = Bytes.toBytes("snaptb0-" + tid); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 90eb594..b73aeef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.junit.Assert; @@ -291,8 +292,8 @@ public class TestAdmin2 extends TestAdminBase { } boolean isInList = ProtobufUtil.getOnlineRegions( rs.getRSRpcServices()).contains(info); - long timeout = System.currentTimeMillis() + 10000; - while ((System.currentTimeMillis() < timeout) && (isInList)) { + long timeout = EnvironmentEdgeManager.currentTime() + 10000; + while ((EnvironmentEdgeManager.currentTime() < timeout) && (isInList)) { Thread.sleep(100); isInList = ProtobufUtil.getOnlineRegions( rs.getRSRpcServices()).contains(info); @@ -347,8 +348,8 @@ public class TestAdmin2 extends TestAdminBase { boolean isInList = ProtobufUtil.getOnlineRegions( rs.getRSRpcServices()).contains(info); - long timeout = System.currentTimeMillis() + 10000; - while ((System.currentTimeMillis() < timeout) && (isInList)) { + long timeout = EnvironmentEdgeManager.currentTime() + 10000; + while ((EnvironmentEdgeManager.currentTime() < timeout) && (isInList)) { Thread.sleep(100); isInList = ProtobufUtil.getOnlineRegions( rs.getRSRpcServices()).contains(info); @@ -531,14 +532,14 @@ public class TestAdmin2 extends TestAdminBase { conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 9999)+10); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); try { HBaseAdmin.available(conf); assertTrue(false); } catch (ZooKeeperConnectionException ignored) { } catch (IOException ignored) { } - long end = System.currentTimeMillis(); + long end = EnvironmentEdgeManager.currentTime(); LOG.info("It took "+(end-start)+" ms to find out that" + " HBase was not available"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java index 441d401..e7be08e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin3.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -359,7 +360,7 @@ public class TestAdmin3 extends TestAdminBase { // Now make it so at least the table exists and then do tests against a // nonexistent column family -- see if we get right exceptions. final TableName tableName = - TableName.valueOf(name.getMethodName() + System.currentTimeMillis()); + TableName.valueOf(name.getMethodName() + EnvironmentEdgeManager.currentTime()); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build(); ADMIN.createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index a213cf6..66d9b15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -112,14 +112,14 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { // wait till the table is assigned HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - long timeoutTime = System.currentTimeMillis() + 3000; + long timeoutTime = EnvironmentEdgeManager.currentTime() + 3000; while (true) { List regions = master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName); if (regions.size() > 3) { return regions.get(2); } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > timeoutTime) { fail("Could not find an online region"); } @@ -163,13 +163,13 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { assertTrue(destServerName != null && !destServerName.equals(serverName)); admin.move(hri.getRegionName(), destServerName).get(); - long timeoutTime = System.currentTimeMillis() + 30000; + long timeoutTime = EnvironmentEdgeManager.currentTime() + 30000; while (true) { ServerName sn = rawAdmin.getRegionLocation(hri.getRegionName()).get().getServerName(); if (sn != null && sn.equals(destServerName)) { break; } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > timeoutTime) { fail("Failed to move the region in time: " + hri); } @@ -426,14 +426,14 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { } } - long curt = System.currentTimeMillis(); + long curt = EnvironmentEdgeManager.currentTime(); long waitTime = 10000; long endt = curt + waitTime; CompactionState state = admin.getCompactionState(tableName).get(); while (state == CompactionState.NONE && curt < endt) { Thread.sleep(1); state = admin.getCompactionState(tableName).get(); - curt = System.currentTimeMillis(); + curt = EnvironmentEdgeManager.currentTime(); } // Now, should have the right compaction state, // otherwise, the compaction should have already been done diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java index e56d754..8665be7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTable.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.AfterClass; import org.junit.Before; @@ -391,7 +392,7 @@ public class TestAsyncTable { @Deprecated public void testCheckAndMutateWithTimeRangeForOldApi() throws Exception { AsyncTable table = getTable.get(); - final long ts = System.currentTimeMillis() / 2; + final long ts = EnvironmentEdgeManager.currentTime() / 2; Put put = new Put(row); put.addColumn(FAMILY, QUALIFIER, ts, VALUE); @@ -750,7 +751,7 @@ public class TestAsyncTable { @Test public void testCheckAndMutateWithTimeRange() throws Exception { AsyncTable table = getTable.get(); - final long ts = System.currentTimeMillis() / 2; + final long ts = EnvironmentEdgeManager.currentTime() / 2; Put put = new Put(row); put.addColumn(FAMILY, QUALIFIER, ts, VALUE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java index c7e466a..e798910 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.junit.Assert; import org.junit.ClassRule; @@ -253,7 +254,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase { admin.flush(tableName).join(); } admin.majorCompact(tableName).join(); - long curt = System.currentTimeMillis(); + long curt = EnvironmentEdgeManager.currentTime(); long waitTime = 10000; long endt = curt + waitTime; CompactionState state = admin.getCompactionState(tableName).get(); @@ -261,7 +262,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase { while (state == CompactionState.NONE && curt < endt) { Thread.sleep(100); state = admin.getCompactionState(tableName).get(); - curt = System.currentTimeMillis(); + curt = EnvironmentEdgeManager.currentTime(); LOG.info("Current compaction state 2 is " + state); } // Now, should have the right compaction state, let's wait until the compaction is done diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index f4f5e36..d81d704 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -1462,12 +1463,13 @@ public class TestBlockEvictionFromClient { private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { - long start = System.currentTimeMillis(); - while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) { + long start = EnvironmentEdgeManager.currentTime(); + while (start + timeout > EnvironmentEdgeManager.currentTime() && + store.getStorefilesCount() != count) { Thread.sleep(100); } - System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" + - store.getStorefilesCount()); + System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + + ", cur=" + store.getStorefilesCount()); assertEquals(count, store.getStorefilesCount()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java index 888b3d2..a47ebf3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -109,9 +110,9 @@ public class TestClientScannerRPCTimeout { result = scanner.next(); assertTrue("Expected row: row-1", Bytes.equals(r1, result.getRow())); LOG.info("Got expected first row"); - long t1 = System.currentTimeMillis(); + long t1 = EnvironmentEdgeManager.currentTime(); result = scanner.next(); - assertTrue((System.currentTimeMillis() - t1) > rpcTimeout); + assertTrue((EnvironmentEdgeManager.currentTime() - t1) > rpcTimeout); assertTrue("Expected row: row-2", Bytes.equals(r2, result.getRow())); RSRpcServicesWithScanTimeout.seqNoToSleepOn = -1;// No need of sleep result = scanner.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index dbc0da9..9d8ea55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -161,7 +162,7 @@ public class TestClientTimeouts { */ static class RandomTimeoutBlockingRpcChannel extends AbstractRpcClient.BlockingRpcChannelImplementation { - private static final Random RANDOM = new Random(System.currentTimeMillis()); + private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime()); public static final double CHANCE_OF_TIMEOUT = 0.3; private static AtomicInteger invokations = new AtomicInteger(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java index 5984477..1472fe7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java @@ -67,7 +67,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -82,6 +81,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.ResourceLeakDetector; import org.apache.hbase.thirdparty.io.netty.util.ResourceLeakDetector.Level; @@ -401,7 +401,7 @@ public class TestConnectionImplementation { table.put(put); ManualEnvironmentEdge mee = new ManualEnvironmentEdge(); - mee.setValue(System.currentTimeMillis()); + mee.setValue(EnvironmentEdgeManager.currentTime()); EnvironmentEdgeManager.injectEdge(mee); LOG.info("first get"); table.get(new Get(ROW)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java index 5d088b3..1a72343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.LoadTestKVGenerator; import org.junit.After; import org.junit.AfterClass; @@ -197,7 +198,7 @@ public class TestFastFail { doneHalfway.countDown(); continueOtherHalf.await(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); g = new Get(row); g.addColumn(FAMILY, QUALIFIER); try { @@ -213,7 +214,7 @@ public class TestFastFail { numFailedThreads.addAndGet(1); return false; } finally { - long enTime = System.currentTimeMillis(); + long enTime = EnvironmentEdgeManager.currentTime(); totalTimeTaken.addAndGet(enTime - startTime); if ((enTime - startTime) >= SLEEPTIME) { // Considering the slow workers as the blockedWorkers. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 0c9654d..9113117 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.ClassRule; import org.junit.Rule; @@ -205,7 +206,7 @@ public class TestFromClientSide extends FromClientSideBase { desc.addFamily(hcd); TEST_UTIL.getAdmin().createTable(desc); try (Table h = TEST_UTIL.getConnection().getTable(tableName)) { - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.addColumn(FAMILY, C0, T1); h.put(p); @@ -264,7 +265,7 @@ public class TestFromClientSide extends FromClientSideBase { try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { // future timestamp - long ts = System.currentTimeMillis() * 2; + long ts = EnvironmentEdgeManager.currentTime() * 2; Put put = new Put(ROW, ts); put.addColumn(FAMILY, COLUMN, VALUE); table.put(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index ed7d72d..9defaf5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.AfterClass; @@ -234,7 +235,7 @@ public class TestFromClientSide3 { byte[] row = Bytes.toBytes("SpecifiedRow"); byte[] qual0 = Bytes.toBytes("qual0"); byte[] qual1 = Bytes.toBytes("qual1"); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); Delete d = new Delete(row, now); table.delete(d); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java index 809fd2a..a1c7a6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java @@ -951,7 +951,7 @@ public class TestFromClientSide5 extends FromClientSideBase { @Test public void testCheckAndMutateWithTimeRange() throws IOException { try (Table table = TEST_UTIL.createTable(name.getTableName(), FAMILY)) { - final long ts = System.currentTimeMillis() / 2; + final long ts = EnvironmentEdgeManager.currentTime() / 2; Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, ts, VALUE); @@ -1485,12 +1485,13 @@ public class TestFromClientSide5 extends FromClientSideBase { private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { - long start = System.currentTimeMillis(); - while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) { + long start = EnvironmentEdgeManager.currentTime(); + while (start + timeout > EnvironmentEdgeManager.currentTime() && + store.getStorefilesCount() != count) { Thread.sleep(100); } - System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" + - store.getStorefilesCount()); + System.out.println("start=" + start + ", now=" + EnvironmentEdgeManager.currentTime() + + ", cur=" + store.getStorefilesCount()); assertEquals(count, store.getStorefilesCount()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java index 1cae7a0..699c086 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -111,10 +112,9 @@ public class TestMobCloneSnapshotFromClientCloneLinksAfterDelete // delay the flush to make sure delayFlush = true; SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 20, FAMILY); - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); byte[] snapshotName3 = Bytes.toBytes("snaptb3-" + tid); - TableName clonedTableName3 = - TableName.valueOf(name.getMethodName() + System.currentTimeMillis()); + TableName clonedTableName3 = TableName.valueOf(name.getMethodName() + tid); admin.snapshot(snapshotName3, tableName); delayFlush = false; int snapshot3Rows = -1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java index b558358..9f2cc01 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -46,7 +47,7 @@ public class TestPutDeleteEtcCellIteration { HBaseClassTestRule.forClass(TestPutDeleteEtcCellIteration.class); private static final byte [] ROW = new byte [] {'r'}; - private static final long TIMESTAMP = System.currentTimeMillis(); + private static final long TIMESTAMP = EnvironmentEdgeManager.currentTime(); private static final int COUNT = 10; @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index e10f342..88b4fb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -912,7 +912,7 @@ public class TestScannersFromClientSide { @Test public void testReadExpiredDataForRawScan() throws IOException { TableName tableName = name.getTableName(); - long ts = System.currentTimeMillis() - 10000; + long ts = EnvironmentEdgeManager.currentTime() - 10000; byte[] value = Bytes.toBytes("expired"); try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) { table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, ts, value)); @@ -932,7 +932,7 @@ public class TestScannersFromClientSide { @Test public void testScanWithColumnsAndFilterAndVersion() throws IOException { TableName tableName = name.getTableName(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try (Table table = TEST_UTIL.createTable(tableName, FAMILY, 4)) { for (int i = 0; i < 4; i++) { Put put = new Put(ROW); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 90201c6..874a9ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.AfterClass; @@ -268,7 +269,7 @@ public class TestSnapshotCloneIndependence { countOriginalTable, clonedTableRowCount); // Attempt to add data to the test - Put p = new Put(Bytes.toBytes("new-row-" + System.currentTimeMillis())); + Put p = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime())); p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); originalTable.put(p); @@ -279,7 +280,7 @@ public class TestSnapshotCloneIndependence { "The row count of the cloned table changed as a result of addition to the original", clonedTableRowCount, countRows(clonedTable)); - Put p2 = new Put(Bytes.toBytes("new-row-" + System.currentTimeMillis())); + Put p2 = new Put(Bytes.toBytes("new-row-" + EnvironmentEdgeManager.currentTime())); p2.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); clonedTable.put(p2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index ef52057..10b9622 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -151,7 +152,7 @@ public class TestSnapshotMetadata { * Create a table that has non-default properties so we can see if they hold */ private void createTableWithNonDefaultProperties() throws Exception { - final long startTime = System.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); final String sourceTableNameAsString = STRING_TABLE_NAME + startTime; originalTableName = TableName.valueOf(sourceTableNameAsString); @@ -193,8 +194,8 @@ public class TestSnapshotMetadata { // Clone the original table final String clonedTableNameAsString = "clone" + originalTableName; final TableName clonedTableName = TableName.valueOf(clonedTableNameAsString); - final String snapshotNameAsString = "snapshot" + originalTableName - + System.currentTimeMillis(); + final String snapshotNameAsString = "snapshot" + originalTableName + + EnvironmentEdgeManager.currentTime(); final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); // restore the snapshot into a cloned table and examine the output @@ -285,8 +286,8 @@ public class TestSnapshotMetadata { } // take a "disabled" snapshot - final String snapshotNameAsString = "snapshot" + originalTableName - + System.currentTimeMillis(); + final String snapshotNameAsString = "snapshot" + originalTableName + + EnvironmentEdgeManager.currentTime(); final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, @@ -296,7 +297,7 @@ public class TestSnapshotMetadata { admin.enableTable(originalTableName); if (changeMetadata) { - final String newFamilyNameAsString = "newFamily" + System.currentTimeMillis(); + final String newFamilyNameAsString = "newFamily" + EnvironmentEdgeManager.currentTime(); final byte[] newFamilyName = Bytes.toBytes(newFamilyNameAsString); admin.disableTable(originalTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java index f7681d7..254e1e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotTemporaryDirectory.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -156,7 +157,7 @@ public class TestSnapshotTemporaryDirectory { @Test public void testRestoreDisabledSnapshot() throws IOException, InterruptedException { - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); TableName tableName = TableName.valueOf("testtb-" + tid); byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid); byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid); @@ -223,7 +224,7 @@ public class TestSnapshotTemporaryDirectory { @Test public void testRestoreEnabledSnapshot() throws IOException, InterruptedException { - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); TableName tableName = TableName.valueOf("testtb-" + tid); byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid); byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 33e525d..2ed35d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -449,7 +450,7 @@ public class TestTableSnapshotScanner { } }); // set file modify time and then run cleaner - long time = System.currentTimeMillis() - TimeToLiveHFileCleaner.DEFAULT_TTL * 1000; + long time = EnvironmentEdgeManager.currentTime() - TimeToLiveHFileCleaner.DEFAULT_TTL * 1000; traverseAndSetFileTime(HFileArchiveUtil.getArchivePath(conf), time); UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().runCleaner(); // scan snapshot diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java index 5a2a893..2b8c3a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.Before; import org.junit.ClassRule; @@ -100,14 +101,14 @@ public class TestEntityLocks { } private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) { - long startMillis = System.currentTimeMillis(); + long startMillis = EnvironmentEdgeManager.currentTime(); while (lock.isLocked()) { LOG.info("Sleeping..."); Threads.sleepWithoutInterrupt(100); if (!lock.isLocked()) { return true; } - if (System.currentTimeMillis() - startMillis > maxWaitTimeMillis) { + if (EnvironmentEdgeManager.currentTime() - startMillis > maxWaitTimeMillis) { LOG.info("Timedout..."); return false; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java index bba27fe..73f5ca0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/codec/CodecPerformance.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.codec.KeyValueCodec; import org.apache.hadoop.hbase.codec.MessageCodec; import org.apache.hadoop.hbase.io.CellOutputStream; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Do basic codec performance eval. @@ -68,13 +69,13 @@ public class CodecPerformance { static byte [] runEncoderTest(final int index, final int initialBufferSize, final ByteArrayOutputStream baos, final CellOutputStream encoder, final Cell [] cells) throws IOException { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < cells.length; i++) { encoder.write(cells[i]); } encoder.flush(); LOG.info("" + index + " encoded count=" + cells.length + " in " + - (System.currentTimeMillis() - startTime) + "ms for encoder " + encoder); + (EnvironmentEdgeManager.currentTime() - startTime) + "ms for encoder " + encoder); // Ensure we did not have to grow the backing buffer. assertTrue(baos.size() < initialBufferSize); return baos.toByteArray(); @@ -83,12 +84,12 @@ public class CodecPerformance { static Cell [] runDecoderTest(final int index, final int count, final CellScanner decoder) throws IOException { Cell [] cells = new Cell[count]; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); for (int i = 0; decoder.advance(); i++) { cells[i] = decoder.current(); } LOG.info("" + index + " decoded count=" + cells.length + " in " + - (System.currentTimeMillis() - startTime) + "ms for decoder " + decoder); + (EnvironmentEdgeManager.currentTime() - startTime) + "ms for decoder " + decoder); // Ensure we did not have to grow the backing buffer. assertTrue(cells.length == count); return cells; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java index 24f74d8..9b35c2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorMetrics.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.junit.AfterClass; @@ -109,14 +110,14 @@ public class TestCoprocessorMetrics { public void preCreateTable(ObserverContext ctx, TableDescriptor desc, RegionInfo[] regions) throws IOException { // we rely on the fact that there is only 1 instance of our MasterObserver - this.start = System.currentTimeMillis(); + this.start = EnvironmentEdgeManager.currentTime(); } @Override public void postCreateTable(ObserverContext ctx, TableDescriptor desc, RegionInfo[] regions) throws IOException { if (this.start > 0) { - long time = System.currentTimeMillis() - start; + long time = EnvironmentEdgeManager.currentTime() - start; LOG.info("Create table took: " + time); createTableTimer.updateMillis(time); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java index 2fc4dea..4517b25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorStop.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -50,10 +51,9 @@ public class TestCoprocessorStop { private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorStop.class); private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static final String MASTER_FILE = - "master" + System.currentTimeMillis(); - private static final String REGIONSERVER_FILE = - "regionserver" + System.currentTimeMillis(); + private static final String MASTER_FILE = "master" + EnvironmentEdgeManager.currentTime(); + private static final String REGIONSERVER_FILE = "regionserver" + + EnvironmentEdgeManager.currentTime(); public static class FooCoprocessor implements MasterCoprocessor, RegionServerCoprocessor { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 0b861ce..6a6d128 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -673,7 +673,7 @@ public class TestRegionObserverInterface { EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp; // force a compaction - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); admin.flush(compactTable); // wait for flush for (int i = 0; i < 10; i++) { @@ -956,7 +956,7 @@ public class TestRegionObserverInterface { HFileContext context = new HFileContextBuilder().build(); HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path) .withFileContext(context).create(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { for (int i = 1; i <= 9; i++) { KeyValue kv = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java index f38ca2e..991ffe8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WALEdit; import org.junit.ClassRule; import org.junit.experimental.categories.Category; @@ -68,7 +69,7 @@ public class TestRegionObserverStacking extends TestCase { final Put put, final WALEdit edit, final Durability durability) throws IOException { - id = System.currentTimeMillis(); + id = EnvironmentEdgeManager.currentTime(); try { Thread.sleep(10); } catch (InterruptedException ex) { @@ -89,7 +90,7 @@ public class TestRegionObserverStacking extends TestCase { final Put put, final WALEdit edit, final Durability durability) throws IOException { - id = System.currentTimeMillis(); + id = EnvironmentEdgeManager.currentTime(); try { Thread.sleep(10); } catch (InterruptedException ex) { @@ -110,7 +111,7 @@ public class TestRegionObserverStacking extends TestCase { final Put put, final WALEdit edit, final Durability durability) throws IOException { - id = System.currentTimeMillis(); + id = EnvironmentEdgeManager.currentTime(); try { Thread.sleep(10); } catch (InterruptedException ex) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 6fbd3b5..4ab00ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -142,10 +142,9 @@ public class TestWALObserver { this.fs = TEST_UTIL.getDFSCluster().getFileSystem(); this.hbaseRootDir = CommonFSUtils.getRootDir(conf); this.hbaseWALRootDir = CommonFSUtils.getWALRootDir(conf); - this.oldLogDir = new Path(this.hbaseWALRootDir, - HConstants.HREGION_OLDLOGDIR_NAME); + this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010, - System.currentTimeMillis()).toString(); + EnvironmentEdgeManager.currentTime()).toString(); this.logDir = new Path(this.hbaseWALRootDir, AbstractFSWALProvider.getWALDirectoryName(serverName)); @@ -352,7 +351,8 @@ public class TestWALObserver { LOG.info("WALSplit path == " + p); // Make a new wal for new region open. final WALFactory wals2 = new WALFactory(conf, - ServerName.valueOf(currentTest.getMethodName() + "2", 16010, System.currentTimeMillis()) + ServerName.valueOf(currentTest.getMethodName() + "2", 16010, + EnvironmentEdgeManager.currentTime()) .toString()); WAL wal2 = wals2.getWAL(null); HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java index 9b8e14e..a23f7be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/favored/TestFavoredNodeAssignmentHelper.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Triple; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -98,7 +99,8 @@ public class TestFavoredNodeAssignmentHelper { } }); for (int i = 0; i < 40; i++) { - ServerName server = ServerName.valueOf("foo" + i, 1234, System.currentTimeMillis()); + ServerName server = ServerName.valueOf("foo" + i, 1234, + EnvironmentEdgeManager.currentTime()); String rack = getRack(i); if (!rack.equals(RackManager.UNKNOWN_RACK)) { rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index dc8a423..c154ee7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -65,7 +66,7 @@ public class TestDependentColumnFilter { private static final byte[][] FAMILIES = { Bytes.toBytes("familyOne"),Bytes.toBytes("familyTwo") }; - private static final long STAMP_BASE = System.currentTimeMillis(); + private static final long STAMP_BASE = EnvironmentEdgeManager.currentTime(); private static final long[] STAMPS = { STAMP_BASE-100, STAMP_BASE-200, STAMP_BASE-300 }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java index b62bb8e..ad1bb8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterSerialization.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.filter.MultiRowRangeFilter.RowRange; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.ClassRule; import org.junit.Test; @@ -304,8 +305,8 @@ public class TestFilterSerialization { // Non-empty timestamp list LinkedList list = new LinkedList<>(); - list.add(System.currentTimeMillis()); - list.add(System.currentTimeMillis()); + list.add(EnvironmentEdgeManager.currentTime()); + list.add(EnvironmentEdgeManager.currentTime()); timestampsFilter = new TimestampsFilter(list); assertTrue(timestampsFilter.areSerializedFieldsEqual( ProtobufUtil.toFilter(ProtobufUtil.toFilter(timestampsFilter)))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java index a612290..6d7db5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowAndColumnRangeFilter.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.AfterClass; @@ -174,7 +175,7 @@ public class TestFuzzyRowAndColumnRangeFilter { ResultScanner scanner = hTable.getScanner(scan); List results = new ArrayList<>(); Result result; - long timeBeforeScan = System.currentTimeMillis(); + long timeBeforeScan = EnvironmentEdgeManager.currentTime(); while ((result = scanner.next()) != null) { for (Cell kv : result.listCells()) { LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: " @@ -182,7 +183,7 @@ public class TestFuzzyRowAndColumnRangeFilter { results.add(kv); } } - long scanTime = System.currentTimeMillis() - timeBeforeScan; + long scanTime = EnvironmentEdgeManager.currentTime() - timeBeforeScan; scanner.close(); LOG.info("scan time = " + scanTime + "ms"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java index cf32631..aa44b65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFuzzyRowFilterEndToEnd.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.AfterClass; @@ -341,14 +342,14 @@ public class TestFuzzyRowFilterEndToEnd { RegionScanner scanner = first.getScanner(scan); List results = new ArrayList<>(); // Result result; - long timeBeforeScan = System.currentTimeMillis(); + long timeBeforeScan = EnvironmentEdgeManager.currentTime(); int found = 0; while (scanner.next(results)) { found += results.size(); results.clear(); } found += results.size(); - long scanTime = System.currentTimeMillis() - timeBeforeScan; + long scanTime = EnvironmentEdgeManager.currentTime() - timeBeforeScan; scanner.close(); LOG.info("\nscan time = " + scanTime + "ms"); @@ -442,7 +443,7 @@ public class TestFuzzyRowFilterEndToEnd { ResultScanner scanner = hTable.getScanner(scan); List results = new ArrayList<>(); Result result; - long timeBeforeScan = System.currentTimeMillis(); + long timeBeforeScan = EnvironmentEdgeManager.currentTime(); while ((result = scanner.next()) != null) { for (Cell kv : result.listCells()) { LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: " @@ -450,7 +451,7 @@ public class TestFuzzyRowFilterEndToEnd { results.add(kv); } } - long scanTime = System.currentTimeMillis() - timeBeforeScan; + long scanTime = EnvironmentEdgeManager.currentTime() - timeBeforeScan; scanner.close(); LOG.info("scan time = " + scanTime + "ms"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index a75c0d0..58c9011 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -112,10 +113,10 @@ public class TestBlockReorder { fop.close(); // Let's check we can read it when everybody's there - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); FSDataInputStream fin = dfs.open(p); Assert.assertTrue(toWrite == fin.readDouble()); - long end = System.currentTimeMillis(); + long end = EnvironmentEdgeManager.currentTime(); LOG.info("readtime= " + (end - start)); fin.close(); Assert.assertTrue((end - start) < 30 * 1000); @@ -194,12 +195,11 @@ public class TestBlockReorder { // Now it will fail with a timeout, unfortunately it does not always connect to the same box, // so we try retries times; with the reorder it will never last more than a few milli seconds for (int i = 0; i < retries; i++) { - start = System.currentTimeMillis(); - + start = EnvironmentEdgeManager.currentTime(); fin = dfs.open(p); Assert.assertTrue(toWrite == fin.readDouble()); fin.close(); - end = System.currentTimeMillis(); + end = EnvironmentEdgeManager.currentTime(); LOG.info("HFileSystem readtime= " + (end - start)); Assert.assertFalse("We took too much time to read", (end - start) > 60000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java index ce2877c..ead8dcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderBlockLocation.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -121,13 +122,13 @@ public class TestBlockReorderBlockLocation { for (int i=0; i<10; i++){ // The interceptor is not set in this test, so we get the raw list at this point LocatedBlocks l; - final long max = System.currentTimeMillis() + 10000; + final long max = EnvironmentEdgeManager.currentTime() + 10000; do { l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1); Assert.assertNotNull(l.getLocatedBlocks()); Assert.assertEquals(1, l.getLocatedBlocks().size()); Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length, - System.currentTimeMillis() < max); + EnvironmentEdgeManager.currentTime() < max); } while (l.get(0).getLocations().length != repCount); // Should be filtered, the name is different => The order won't change diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java index 0a4a7f5..16d1ef7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorderMultiBlocks.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -249,10 +250,10 @@ public class TestBlockReorderMultiBlocks { for (int i = 0; i < 10; i++) { LocatedBlocks l; // The NN gets the block list asynchronously, so we may need multiple tries to get the list - final long max = System.currentTimeMillis() + 10000; + final long max = EnvironmentEdgeManager.currentTime() + 10000; boolean done; do { - Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max); + Assert.assertTrue("Can't get enouth replica", EnvironmentEdgeManager.currentTime() < max); l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1); Assert.assertNotNull("Can't get block locations for " + src, l); Assert.assertNotNull(l.getLocatedBlocks()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 3f326a3..398817f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -612,9 +613,9 @@ public class TestHeapSize { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); ClassSize.estimateBase(cl, false); - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); assertTrue(endTime - startTime < 5); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index 514f966..9596ad8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -235,12 +236,12 @@ public class TestChangingEncoding { admin.majorCompact(tableName); // Waiting for the compaction to start, at least .5s. - final long maxWaitime = System.currentTimeMillis() + 500; + final long maxWaitime = EnvironmentEdgeManager.currentTime() + 500; boolean cont; do { cont = rs.getCompactSplitThread().getCompactionQueueSize() == 0; Threads.sleep(1); - } while (cont && System.currentTimeMillis() < maxWaitime); + } while (cont && EnvironmentEdgeManager.currentTime() < maxWaitime); while (rs.getCompactSplitThread().getCompactionQueueSize() > 0) { Threads.sleep(1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 2da64e2..d7294bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.compress.Compressor; import org.junit.After; @@ -760,11 +761,11 @@ public class TestHFileBlock { @Override public Boolean call() throws Exception { Random rand = new Random(clientId.hashCode()); - long endTime = System.currentTimeMillis() + 10000; + long endTime = EnvironmentEdgeManager.currentTime() + 10000; int numBlocksRead = 0; int numPositionalRead = 0; int numWithOnDiskSize = 0; - while (System.currentTimeMillis() < endTime) { + while (EnvironmentEdgeManager.currentTime() < endTime) { int blockId = rand.nextInt(NUM_TEST_BLOCKS); long offset = offsets.get(blockId); // now we only support concurrent read with pread = true diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java index de6bc24..cd33aac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileScannerImplReferenceCount.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -139,7 +140,7 @@ public class TestHFileScannerImplReferenceCount { this.allocator = ByteBuffAllocator.create(UTIL.getConfiguration(), true); this.conf = new Configuration(UTIL.getConfiguration()); this.fs = this.workDir.getFileSystem(conf); - this.hfilePath = new Path(this.workDir, caseName + System.currentTimeMillis()); + this.hfilePath = new Path(this.workDir, caseName + EnvironmentEdgeManager.currentTime()); LOG.info("Start to write {} cells into hfile: {}, case:{}", CELL_COUNT, hfilePath, caseName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java index 0429541..1574df7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java @@ -107,7 +107,7 @@ public class TestScannerFromBucketCache { byte[] qf2 = Bytes.toBytes("qualifier2"); byte[] fam1 = Bytes.toBytes("lrucache"); - long ts1 = 1; // System.currentTimeMillis(); + long ts1 = 1; long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -145,7 +145,7 @@ public class TestScannerFromBucketCache { byte[] qf2 = Bytes.toBytes("qualifier2"); byte[] fam1 = Bytes.toBytes("famoffheap"); - long ts1 = 1; // System.currentTimeMillis(); + long ts1 = 1; long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -186,7 +186,7 @@ public class TestScannerFromBucketCache { byte[] qf2 = Bytes.toBytes("qualifier2"); byte[] fam1 = Bytes.toBytes("famoffheap"); - long ts1 = 1; // System.currentTimeMillis(); + long ts1 = 1; long ts2 = ts1 + 1; long ts3 = ts1 + 2; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java index ab282e3..61d91e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtobufRpcServiceImpl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; @@ -58,13 +59,14 @@ public class TestProtobufRpcServiceImpl implements BlockingInterface { public static BlockingInterface newBlockingStub(RpcClient client, InetSocketAddress addr, User user) throws IOException { return TestProtobufRpcProto.newBlockingStub(client.createBlockingRpcChannel( - ServerName.valueOf(addr.getHostName(), addr.getPort(), System.currentTimeMillis()), user, 0)); + ServerName.valueOf(addr.getHostName(), addr.getPort(), + EnvironmentEdgeManager.currentTime()), user, 0)); } public static Interface newStub(RpcClient client, InetSocketAddress addr) throws IOException { return TestProtobufRpcProto.newStub(client.createRpcChannel( - ServerName.valueOf(addr.getHostName(), addr.getPort(), System.currentTimeMillis()), - User.getCurrent(), 0)); + ServerName.valueOf(addr.getHostName(), addr.getPort(), + EnvironmentEdgeManager.currentTime()), User.getCurrent(), 0)); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index dee9c65..f791421 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -480,7 +480,12 @@ public class TestSimpleRpcScheduler { for (String threadNamePrefix : threadNamePrefixs) { String threadName = Thread.currentThread().getName(); if (threadName.startsWith(threadNamePrefix)) { - return timeQ.poll().longValue() + offset; + if (timeQ != null) { + Long qTime = timeQ.poll(); + if (qTime != null) { + return qTime.longValue() + offset; + } + } } } return System.currentTimeMillis(); @@ -511,17 +516,16 @@ public class TestSimpleRpcScheduler { try { // Loading mocked call runner can take a good amount of time the first time through // (haven't looked why). Load it for first time here outside of the timed loop. - getMockedCallRunner(System.currentTimeMillis(), 2); + getMockedCallRunner(EnvironmentEdgeManager.currentTime(), 2); scheduler.start(); EnvironmentEdgeManager.injectEdge(envEdge); envEdge.offset = 5; // Calls faster than min delay // LOG.info("Start"); for (int i = 0; i < 100; i++) { - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); envEdge.timeQ.put(time); CallRunner cr = getMockedCallRunner(time, 2); - // LOG.info("" + i + " " + (System.currentTimeMillis() - now) + " cr=" + cr); scheduler.dispatch(cr); } // LOG.info("Loop done"); @@ -534,7 +538,7 @@ public class TestSimpleRpcScheduler { envEdge.offset = 151; // calls slower than min delay, but not individually slow enough to be dropped for (int i = 0; i < 20; i++) { - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); envEdge.timeQ.put(time); CallRunner cr = getMockedCallRunner(time, 2); scheduler.dispatch(cr); @@ -549,7 +553,7 @@ public class TestSimpleRpcScheduler { envEdge.offset = 2000; // now slow calls and the ones to be dropped for (int i = 0; i < 60; i++) { - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); envEdge.timeQ.put(time); CallRunner cr = getMockedCallRunner(time, 100); scheduler.dispatch(cr); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java index 24fdbc4..1e11c6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; @@ -371,9 +372,10 @@ public abstract class AbstractTestDLS { row = Arrays.copyOfRange(row, 3, 8); // use last 5 bytes because // HBaseTestingUtility.createMultiRegions use 5 bytes key byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i)); - e.add(new KeyValue(row, COLUMN_FAMILY, qualifier, System.currentTimeMillis(), value)); + e.add(new KeyValue(row, COLUMN_FAMILY, qualifier, EnvironmentEdgeManager.currentTime(), + value)); log.appendData(curRegionInfo, new WALKeyImpl(curRegionInfo.getEncodedNameAsBytes(), - tableName, System.currentTimeMillis(), mvcc), e); + tableName, EnvironmentEdgeManager.currentTime(), mvcc), e); if (0 == i % syncEvery) { log.sync(); } @@ -428,12 +430,12 @@ public abstract class AbstractTestDLS { private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems) throws InterruptedException { - long curt = System.currentTimeMillis(); + long curt = EnvironmentEdgeManager.currentTime(); long endt = curt + timems; while (curt < endt) { if (ctr.sum() == oldval) { Thread.sleep(100); - curt = System.currentTimeMillis(); + curt = EnvironmentEdgeManager.currentTime(); } else { assertEquals(newval, ctr.sum()); return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 986d718..81d6beb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZKListener; @@ -89,7 +90,8 @@ public class TestActiveMasterManager { } // Create the master node with a dummy address - ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis()); + ServerName master = ServerName.valueOf("localhost", 1, + EnvironmentEdgeManager.currentTime()); // Should not have a master yet DummyMaster dummyMaster = new DummyMaster(zk, master); ClusterStatusTracker clusterStatusTracker = @@ -138,9 +140,9 @@ public class TestActiveMasterManager { // Create the master node with a dummy address ServerName firstMasterAddress = - ServerName.valueOf("localhost", 1, System.currentTimeMillis()); + ServerName.valueOf("localhost", 1, EnvironmentEdgeManager.currentTime()); ServerName secondMasterAddress = - ServerName.valueOf("localhost", 2, System.currentTimeMillis()); + ServerName.valueOf("localhost", 2, EnvironmentEdgeManager.currentTime()); // Should not have a master yet DummyMaster ms1 = new DummyMaster(zk, firstMasterAddress); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index a0aae32..ee2c99a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -65,7 +66,7 @@ public class TestClockSkewDetection { RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); request.setPort(1234); request.setServerStartCode(-1); - request.setServerCurrentTime(System.currentTimeMillis()); + request.setServerCurrentTime(EnvironmentEdgeManager.currentTime()); sm.regionServerStartup(request.build(), 0, "0.0.0", ia1); final Configuration c = HBaseConfiguration.create(); @@ -80,7 +81,7 @@ public class TestClockSkewDetection { request = RegionServerStartupRequest.newBuilder(); request.setPort(1235); request.setServerStartCode(-1); - request.setServerCurrentTime(System.currentTimeMillis() - maxSkew * 2); + request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() - maxSkew * 2); sm.regionServerStartup(request.build(), 0, "0.0.0", ia2); fail("HMaster should have thrown a ClockOutOfSyncException but didn't."); } catch(ClockOutOfSyncException e) { @@ -96,7 +97,7 @@ public class TestClockSkewDetection { request = RegionServerStartupRequest.newBuilder(); request.setPort(1236); request.setServerStartCode(-1); - request.setServerCurrentTime(System.currentTimeMillis() + maxSkew * 2); + request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() + maxSkew * 2); sm.regionServerStartup(request.build(), 0, "0.0.0", ia3); fail("HMaster should have thrown a ClockOutOfSyncException but didn't."); } catch (ClockOutOfSyncException e) { @@ -110,7 +111,7 @@ public class TestClockSkewDetection { request = RegionServerStartupRequest.newBuilder(); request.setPort(1237); request.setServerStartCode(-1); - request.setServerCurrentTime(System.currentTimeMillis() - warningSkew * 2); + request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() - warningSkew * 2); sm.regionServerStartup(request.build(), 0, "0.0.0", ia4); // make sure values above warning threshold but below max threshold don't kill @@ -119,7 +120,7 @@ public class TestClockSkewDetection { request = RegionServerStartupRequest.newBuilder(); request.setPort(1238); request.setServerStartCode(-1); - request.setServerCurrentTime(System.currentTimeMillis() + warningSkew * 2); + request.setServerCurrentTime(EnvironmentEdgeManager.currentTime() + warningSkew * 2); sm.regionServerStartup(request.build(), 0, "0.0.0", ia5); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java index d2b3791..91a3c48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -91,7 +92,8 @@ public class TestMasterMetrics { try { serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn).setVersionNumber(sm.getVersionNumber()) - .setVersion(sm.getVersion()).setLastReportTimestamp(System.currentTimeMillis()) + .setVersion(sm.getVersion()) + .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()) .build()); } catch (YouAreDeadException e) { throw new UncheckedIOException(e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index dff2051..e4e408e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -161,7 +162,7 @@ public class TestMasterNoCluster { */ @Ignore @Test // Disabled since HBASE-18511. Reenable when master can carry regions. public void testFailover() throws Exception { - final long now = System.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); // Names for our three servers. Make the port numbers match hostname. // Will come in use down in the server when we need to figure how to respond. final ServerName sn0 = ServerName.valueOf("0.example.org", 0, now); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java index ac2f9ab..f9bdac5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -70,20 +71,20 @@ public class TestMetaAssignmentWithStopMaster { ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName(); UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test"); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); while (UTIL.getMiniHBaseCluster().getMaster() == null || UTIL.getMiniHBaseCluster().getMaster() .getServerName().equals(oldMaster)) { LOG.info("Wait the standby master become active"); Thread.sleep(3000); - if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) { + if (EnvironmentEdgeManager.currentTime() - startTime > WAIT_TIMEOUT) { fail("Wait too long for standby master become active"); } } - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { LOG.info("Wait the new active master to be initialized"); Thread.sleep(3000); - if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) { + if (EnvironmentEdgeManager.currentTime() - startTime > WAIT_TIMEOUT) { fail("Wait too long for the new active master to be initialized"); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index 87028f3..ec84503 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; @@ -190,7 +191,7 @@ public class TestRegionPlacement { throws IOException, InterruptedException, KeeperException { ServerName serverToKill = null; int killIndex = 0; - Random random = new Random(System.currentTimeMillis()); + Random random = new Random(EnvironmentEdgeManager.currentTime()); ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta(); LOG.debug("Server holding meta " + metaServer); boolean isNamespaceServer = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 3dbe86b..17b0f6b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; import org.apache.hadoop.hbase.regionserver.TestMasterAddressTracker.NodeCreationListener; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -253,7 +254,7 @@ public class TestSplitLogManager { assertTrue(task.isOrphan()); waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); assertFalse(task.isUnassigned()); - long curt = System.currentTimeMillis(); + long curt = EnvironmentEdgeManager.currentTime(); assertTrue((task.last_update <= curt) && (task.last_update > (curt - 1000))); LOG.info("waiting for manager to resubmit the orphan task"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java index b95ed45..e64cd58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -99,7 +100,7 @@ public class TestWarmupRegion { // future timestamp for (int i = 0; i < numRows; i++) { - long ts = System.currentTimeMillis() * 2; + long ts = EnvironmentEdgeManager.currentTime() * 2; Put put = new Put(ROW, ts); put.addColumn(FAMILY, COLUMN, VALUE); table.put(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index 2860480..bd1786a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.KeeperException; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -157,7 +158,7 @@ public class MockMasterServices extends MockNoopMasterServices { for (int i = 0; i < numServes; ++i) { ServerName sn = ServerName.valueOf("localhost", 100 + i, 1); serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn) - .setLastReportTimestamp(System.currentTimeMillis()).build()); + .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } this.procedureExecutor.getEnvironment().setEventReady(initialized, true); } @@ -184,7 +185,7 @@ public class MockMasterServices extends MockNoopMasterServices { } ServerName sn = ServerName.valueOf(serverName.getAddress().toString(), startCode); serverManager.regionServerReport(sn, ServerMetricsBuilder.newBuilder(sn) - .setLastReportTimestamp(System.currentTimeMillis()).build()); + .setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java index b7dd87b..a631fe6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -144,14 +145,14 @@ public class TestAssignmentManager extends TestAssignmentManagerBase { TransitRegionStateProcedure[] assignments = new TransitRegionStateProcedure[nRegions]; - long st = System.currentTimeMillis(); + long st = EnvironmentEdgeManager.currentTime(); bulkSubmit(assignments); for (int i = 0; i < assignments.length; ++i) { ProcedureTestingUtility.waitProcedure(master.getMasterProcedureExecutor(), assignments[i]); assertTrue(assignments[i].toString(), assignments[i].isSuccess()); } - long et = System.currentTimeMillis(); + long et = EnvironmentEdgeManager.currentTime(); float sec = ((et - st) / 1000.0f); LOG.info(String.format("[T] Assigning %dprocs in %s (%.2fproc/sec)", assignments.length, StringUtils.humanTimeDiff(et - st), assignments.length / sec)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java index c3a613c..85255d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.junit.After; import org.junit.Before; @@ -310,7 +311,7 @@ public abstract class TestAssignmentManagerBase { newRsAdded++; try { this.master.getServerManager().regionServerReport(newSn, ServerMetricsBuilder - .newBuilder(newSn).setLastReportTimestamp(System.currentTimeMillis()).build()); + .newBuilder(newSn).setLastReportTimestamp(EnvironmentEdgeManager.currentTime()).build()); } catch (YouAreDeadException e) { // should not happen throw new UncheckedIOException(e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java index 0d3790f..10d069e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionReplicaSplit.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.RegionSplitter; import org.junit.AfterClass; @@ -139,7 +140,7 @@ public class TestRegionReplicaSplit { final RegionInfo fakeHri = RegionInfoBuilder.newBuilder(table.getName()).setStartKey(Bytes.toBytes("a")) .setEndKey(Bytes.toBytes("b")).setReplicaId(1) - .setRegionId(System.currentTimeMillis()).build(); + .setRegionId(EnvironmentEdgeManager.currentTime()).build(); // To test AssignProcedure can defend this case. HTU.getMiniHBaseCluster().getMaster().getAssignmentManager().assign(fakeHri); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index 02215ca..a99d79f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -149,7 +149,7 @@ public class TestRegionStateStore { @Test public void testMetaLocationForRegionReplicasIsRemovedAtTableDeletion() throws IOException { - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); TableName tableName = name.getTableName(); RegionInfo primary = RegionInfoBuilder.newBuilder(tableName) .setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java index b24ec16..c887e3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; @@ -170,7 +171,7 @@ public class TestRegionStates { final int NRUNS = 1000000; // 1M final RegionStates stateMap = new RegionStates(); - long st = System.currentTimeMillis(); + long st = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < NRUNS; ++i) { final int regionId = i; executorService.submit(new Callable() { @@ -182,12 +183,12 @@ public class TestRegionStates { }); } waitExecutorService(NRUNS); - long et = System.currentTimeMillis(); + long et = EnvironmentEdgeManager.currentTime(); LOG.info(String.format("PERF STATEMAP INSERT: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); - st = System.currentTimeMillis(); + st = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < NRUNS; ++i) { final int regionId = i; executorService.submit(new Callable() { @@ -200,7 +201,7 @@ public class TestRegionStates { } waitExecutorService(NRUNS); - et = System.currentTimeMillis(); + et = EnvironmentEdgeManager.currentTime(); LOG.info(String.format("PERF STATEMAP GET: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); @@ -212,11 +213,11 @@ public class TestRegionStates { final int NRUNS = 1 * 1000000; // 1M final RegionStates stateMap = new RegionStates(); - long st = System.currentTimeMillis(); + long st = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < NRUNS; ++i) { stateMap.createRegionStateNode(createRegionInfo(TABLE_NAME, i)); } - long et = System.currentTimeMillis(); + long et = EnvironmentEdgeManager.currentTime(); LOG.info(String.format("PERF SingleThread: %s %s/sec", StringUtils.humanTimeDiff(et - st), StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java index c88e583..8c0541c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -149,7 +150,8 @@ public class TestRogueRSAssignment { List tableRegions = createTable(tableName); final ServerName sn = ServerName.parseVersionedServerName( - ServerName.valueOf("1.example.org", 1, System.currentTimeMillis()).getVersionedBytes()); + ServerName.valueOf("1.example.org", 1, EnvironmentEdgeManager.currentTime()) + .getVersionedBytes()); // make fake request with a region assigned to different RS RegionServerStatusProtos.RegionServerReportRequest.Builder request = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index b07147d..db311ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -89,7 +89,7 @@ public class TestHFileCleaner { Path root = UTIL.getDataTestDirOnTestFS(); Path file = new Path(root, "file"); fs.createNewFile(file); - long createTime = System.currentTimeMillis(); + long createTime = EnvironmentEdgeManager.currentTime(); assertTrue("Test file not created!", fs.exists(file)); TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner(); // update the time info for the file, so the cleaner removes it @@ -167,7 +167,7 @@ public class TestHFileCleaner { HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir, POOL); // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files - final long createTime = System.currentTimeMillis(); + final long createTime = EnvironmentEdgeManager.currentTime(); fs.delete(archivedHfileDir, true); fs.mkdirs(archivedHfileDir); // Case 1: 1 invalid file, which should be deleted directly diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index ac29fee..e0414fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; @@ -149,7 +150,7 @@ public class TestLogsCleaner { final FileSystem fs = FileSystem.get(conf); fs.mkdirs(OLD_PROCEDURE_WALS_DIR); - final long now = System.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); // Case 1: 2 invalid files, which would be deleted directly fs.createNewFile(new Path(OLD_WALS_DIR, "a")); @@ -229,8 +230,8 @@ public class TestLogsCleaner { ReplicationLogCleaner cleaner = new ReplicationLogCleaner(); List dummyFiles = Arrays.asList( - new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")), - new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2")) + new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log1")), + new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), new Path("log2")) ); try (FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, @@ -269,7 +270,7 @@ public class TestLogsCleaner { // Subtract 1000 from current time so modtime is for sure older // than 'now'. - long modTime = System.currentTimeMillis() - 1000; + long modTime = EnvironmentEdgeManager.currentTime() - 1000; List dummyFiles = Arrays.asList( new FileStatus(100, false, 3, 100, modTime, new Path("log1")), new FileStatus(100, false, 3, 100, modTime, new Path("log2")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index db5c7b2..2b43734 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -192,9 +193,10 @@ public class TestReplicationHFileCleaner { ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner(); List dummyFiles = - Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path( - "hfile1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path( - "hfile2"))); + Lists.newArrayList(new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), + new Path("hfile1")), + new FileStatus(100, false, 3, 100, EnvironmentEdgeManager.currentTime(), + new Path("hfile2"))); FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java index 74797e6..c0b7646 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.zookeeper.KeeperException; import org.junit.After; @@ -136,7 +137,7 @@ public class TestCatalogJanitor { Path storedir = HRegionFileSystem.getStoreHomedir(tabledir, splita, td.getColumnFamilies()[0].getName()); Reference ref = Reference.createTopReference(Bytes.toBytes("ccc")); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem(); @@ -624,7 +625,7 @@ public class TestCatalogJanitor { td.getColumnFamilies()[0].getName()); Reference ref = top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = services.getMasterFileSystem().getFileSystem(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java index ca64891..8199787 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorCluster.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.master.assignment.RegionStateStore; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.Before; @@ -121,11 +122,11 @@ public class TestCatalogJanitorCluster { RegionInfo ri = t3Ris.get(0); RegionInfo newRi1 = RegionInfoBuilder.newBuilder(ri.getTable()) .setStartKey(incrementRow(ri.getStartKey())).setEndKey(incrementRow(ri.getEndKey())).build(); - Put p1 = MetaTableAccessor.makePutFromRegionInfo(newRi1, System.currentTimeMillis()); + Put p1 = MetaTableAccessor.makePutFromRegionInfo(newRi1, EnvironmentEdgeManager.currentTime()); RegionInfo newRi2 = RegionInfoBuilder.newBuilder(newRi1.getTable()) .setStartKey(incrementRow(newRi1.getStartKey())).setEndKey(incrementRow(newRi1.getEndKey())) .build(); - Put p2 = MetaTableAccessor.makePutFromRegionInfo(newRi2, System.currentTimeMillis()); + Put p2 = MetaTableAccessor.makePutFromRegionInfo(newRi2, EnvironmentEdgeManager.currentTime()); MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(p1, p2)); janitor.scan(); report = janitor.getLastReport(); @@ -182,7 +183,8 @@ public class TestCatalogJanitorCluster { // add a new region [a, cc) RegionInfo newRiT4 = RegionInfoBuilder.newBuilder(T4).setStartKey("a".getBytes()) .setEndKey("cc".getBytes()).build(); - Put putForT4 = MetaTableAccessor.makePutFromRegionInfo(newRiT4, System.currentTimeMillis()); + Put putForT4 = MetaTableAccessor.makePutFromRegionInfo(newRiT4, + EnvironmentEdgeManager.currentTime()); MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(putForT4)); janitor.scan(); @@ -204,7 +206,8 @@ public class TestCatalogJanitorCluster { // add a new region [a, g) RegionInfo newRiT5 = RegionInfoBuilder.newBuilder(T5).setStartKey("a".getBytes()) .setEndKey("g".getBytes()).build(); - Put putForT5 = MetaTableAccessor.makePutFromRegionInfo(newRiT5, System.currentTimeMillis()); + Put putForT5 = MetaTableAccessor.makePutFromRegionInfo(newRiT5, + EnvironmentEdgeManager.currentTime()); MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(putForT5)); janitor.scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java index d608480..041c325 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; @@ -178,13 +179,13 @@ public class TestCatalogJanitorInMemoryStates { * @return Daughter regions; caller needs to check table actually split. */ private PairOfSameType waitOnDaughters(final RegionInfo r) throws IOException { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); PairOfSameType pair = null; try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { Result result = null; RegionInfo region = null; - while ((System.currentTimeMillis() - start) < 60000) { + while ((EnvironmentEdgeManager.currentTime() - start) < 60000) { result = metaTable.get(new Get(r.getRegionName())); if (result == null) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java index d398870..93b3920 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; @@ -166,12 +167,12 @@ public class TestMetaFixer { private static RegionInfo makeOverlap(MasterServices services, RegionInfo a, RegionInfo b) throws IOException { RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()). - setStartKey(a.getStartKey()). - setEndKey(b.getEndKey()). - build(); + setStartKey(a.getStartKey()). + setEndKey(b.getEndKey()). + build(); MetaTableAccessor.putsToMetaTable(services.getConnection(), - Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion, - System.currentTimeMillis()))); + Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion, + EnvironmentEdgeManager.currentTime()))); // TODO: Add checks at assign time to PREVENT being able to assign over existing assign. long assign = services.getAssignmentManager().assign(overlapRegion); ProcedureTestingUtility.waitProcedures(services.getMasterProcedureExecutor(), assign); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java index e75acb7..049e060 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.hamcrest.core.IsInstanceOf; import org.hamcrest.core.StringStartsWith; import org.junit.After; @@ -192,8 +193,8 @@ public class TestLockProcedure { * @throws TimeoutException if lock couldn't be acquired. */ private boolean awaitForLocked(long procId, long timeoutInMs) throws Exception { - long deadline = System.currentTimeMillis() + timeoutInMs; - while (System.currentTimeMillis() < deadline) { + long deadline = EnvironmentEdgeManager.currentTime() + timeoutInMs; + while (EnvironmentEdgeManager.currentTime() < deadline) { LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null, LockHeartbeatRequest.newBuilder().setProcId(procId).build()); if (response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) { @@ -297,7 +298,7 @@ public class TestLockProcedure { // Acquire namespace lock, then queue other locks. long nsProcId = queueLock(nsLock); assertTrue(awaitForLocked(nsProcId, 2000)); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); sendHeartbeatAndCheckLocked(nsProcId, true); long table1ProcId = queueLock(tableLock1); long table2ProcId = queueLock(tableLock2); @@ -305,7 +306,7 @@ public class TestLockProcedure { long regions2ProcId = queueLock(regionsLock2); // Assert tables & region locks are waiting because of namespace lock. - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // leave extra 10 msec in case more than half the HEARTBEAT_TIMEOUT has passed Thread.sleep(Math.min(HEARTBEAT_TIMEOUT / 2, Math.max(HEARTBEAT_TIMEOUT-(now-start)-10, 0))); sendHeartbeatAndCheckLocked(nsProcId, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java index ae874d5..0c3303b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure; import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -200,7 +201,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase private class AddProcsWorker extends Thread { @Override public void run() { - final Random rand = new Random(System.currentTimeMillis()); + final Random rand = new Random(EnvironmentEdgeManager.currentTime()); long procId = procIds.incrementAndGet(); int index; while (procId <= numOps) { @@ -244,14 +245,14 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase * @return time taken by threads to complete, in milliseconds. */ long runThreads(Thread[] threads) throws Exception { - final long startTime = System.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); for (Thread t : threads) { t.start(); } for (Thread t : threads) { t.join(); } - return System.currentTimeMillis() - startTime; + return EnvironmentEdgeManager.currentTime() - startTime; } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index c798c69..94254d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.MD5Hash; import org.apache.hadoop.hbase.util.ModifyRegionUtils; @@ -336,7 +337,7 @@ public class MasterProcedureTestingUtility { // Ensure one row per region assertTrue(rows >= splitKeys.length); for (byte[] k: splitKeys) { - byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k); + byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), k); byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value))); mutator.mutate(createPut(families, key, value)); rows--; @@ -344,7 +345,8 @@ public class MasterProcedureTestingUtility { // Add other extra rows. more rows, more files while (rows-- > 0) { - byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows)); + byte[] value = Bytes.add(Bytes.toBytes(EnvironmentEdgeManager.currentTime()), + Bytes.toBytes(rows)); byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value)); mutator.mutate(createPut(families, key, value)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java index f4fa282..3d32ea3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.ClassRule; import org.junit.Test; @@ -70,7 +71,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase { private SnapshotProtos.SnapshotDescription getSnapshot() throws Exception { if (snapshot == null) { final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot"); - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); final byte[] snapshotName = Bytes.toBytes("snapshot-" + tid); Admin admin = UTIL.getAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java index 4c7c271..7e5892b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -96,7 +97,7 @@ public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase { } private void setupSnapshotAndUpdateTable() throws Exception { - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); final byte[] snapshotName = Bytes.toBytes("snapshot-" + tid); Admin admin = UTIL.getAdmin(); // create Table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java index 44c5510..f531288 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSafemodeBringsDownMaster.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -106,12 +107,12 @@ public class TestSafemodeBringsDownMaster { DistributedFileSystem dfs = (DistributedFileSystem) dfsCluster.getFileSystem(); dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); final long timeOut = 180000; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); int index = -1; do { index = UTIL.getMiniHBaseCluster().getServerWithMeta(); } while (index == -1 && - startTime + timeOut < System.currentTimeMillis()); + startTime + timeOut < EnvironmentEdgeManager.currentTime()); if (index != -1){ UTIL.getMiniHBaseCluster().abortRegionServer(index); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java index 9082b1d..45102dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.master.cleaner.DirScanPool; import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; @@ -83,7 +84,7 @@ public class MasterRegionTestBase { Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(htu.getConfiguration()); when(server.getServerName()) - .thenReturn(ServerName.valueOf("localhost", 12345, System.currentTimeMillis())); + .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); when(server.getChoreService()).thenReturn(choreService); Path testDir = htu.getDataTestDir(); CommonFSUtils.setRootDir(htu.getConfiguration(), testDir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java index 7fc72b3..713fc30 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionCompaction.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.junit.ClassRule; import org.junit.Test; @@ -128,7 +129,7 @@ public class TestMasterRegionCompaction extends MasterRegionTestBase { Thread.sleep(2000); // touch one file - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); fs.setTimes(compactedHFiles[0].getPath(), currentTime, currentTime); Thread.sleep(3000); // only the touched file is still there after clean up diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java index 4e49cb4..9d1e60f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.junit.After; import org.junit.AfterClass; @@ -135,7 +136,8 @@ public class TestMasterRegionOnTwoFileSystems { Path walRootDir = WAL_UTIL.getDataTestDirOnTestFS(); FileSystem walFs = WAL_UTIL.getTestFileSystem(); walFs.delete(walRootDir, true); - region = createMasterRegion(ServerName.valueOf("localhost", 12345, System.currentTimeMillis())); + region = createMasterRegion(ServerName.valueOf("localhost", 12345, + EnvironmentEdgeManager.currentTime())); } @After @@ -219,7 +221,7 @@ public class TestMasterRegionOnTwoFileSystems { } region.close(true); region = createMasterRegion( - ServerName.valueOf("localhost", 12345, System.currentTimeMillis() + round + 1)); + ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime() + round + 1)); try (RegionScanner scanner = region.getScanner(new Scan())) { List cells = new ArrayList<>(); boolean moreValues = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java index e2c7a7f..f22ec8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Assert; public class MobTestUtil { @@ -63,7 +64,7 @@ public class MobTestUtil { */ private static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier) throws IOException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) { for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java index bffb062..9e90145 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.AfterClass; @@ -131,14 +132,14 @@ public class TestExpiredMobFileCleaner { Path mobDirPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family); byte[] dummyData = makeDummyData(600); - long ts = System.currentTimeMillis() - 3 * secondsOfDay() * 1000; // 3 days before + long ts = EnvironmentEdgeManager.currentTime() - 3 * secondsOfDay() * 1000; // 3 days before putKVAndFlush(table, row1, dummyData, ts); FileStatus[] firstFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); //the first mob file assertEquals("Before cleanup without delay 1", 1, firstFiles.length); String firstFile = firstFiles[0].getPath().getName(); - ts = System.currentTimeMillis() - 1 * secondsOfDay() * 1000; // 1 day before + ts = EnvironmentEdgeManager.currentTime() - 1 * secondsOfDay() * 1000; // 1 day before putKVAndFlush(table, row2, dummyData, ts); FileStatus[] secondFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath); //now there are 2 mob files diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java index 8aadf00..9ed2ad3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -104,7 +105,7 @@ public class TestMobDataBlockEncoding { public void testDataBlockEncoding(DataBlockEncoding encoding) throws Exception { String TN = "testDataBlockEncoding" + encoding; setUp(defaultThreshold, TN, encoding); - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; byte[] value = generateMobValue((int) defaultThreshold + 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java index f14ceae..f3214e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java @@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -104,6 +103,8 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + @Category(LargeTests.class) public class TestMobCompactor { @@ -438,7 +439,7 @@ public class TestMobCompactor { loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count * rowNumPerFile; - long tid = System.currentTimeMillis(); + long tid = EnvironmentEdgeManager.currentTime(); byte[] snapshotName1 = Bytes.toBytes("snaptb-" + tid); // take a snapshot admin.snapshot(snapshotName1, tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 0aabd31..c50478d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -73,9 +73,9 @@ import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -86,6 +86,8 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + @Category(LargeTests.class) public class TestPartitionedMobCompactor { @@ -159,7 +161,7 @@ public class TestPartitionedMobCompactor { @Test public void testCompactionSelectPartFilesWeeklyPolicyWithPastWeek() throws Exception { String tableName = "testCompactionSelectPartFilesWeeklyPolicyWithPastWeek"; - Date dateLastWeek = new Date(System.currentTimeMillis() - (7 * DAY_IN_MS)); + Date dateLastWeek = new Date(EnvironmentEdgeManager.currentTime() - (7 * DAY_IN_MS)); testCompactionAtMergeSize(tableName, 700, CompactionType.PART_FILES, false, false, dateLastWeek, MobCompactPartitionPolicy.WEEKLY, 7); } @@ -167,7 +169,7 @@ public class TestPartitionedMobCompactor { @Test public void testCompactionSelectAllFilesWeeklyPolicyWithPastWeek() throws Exception { String tableName = "testCompactionSelectAllFilesWeeklyPolicyWithPastWeek"; - Date dateLastWeek = new Date(System.currentTimeMillis() - (7 * DAY_IN_MS)); + Date dateLastWeek = new Date(EnvironmentEdgeManager.currentTime() - (7 * DAY_IN_MS)); testCompactionAtMergeSize(tableName, 3000, CompactionType.ALL_FILES, false, false, dateLastWeek, MobCompactPartitionPolicy.WEEKLY, 7); } @@ -175,7 +177,7 @@ public class TestPartitionedMobCompactor { @Test public void testCompactionSelectAllFilesMonthlyPolicy() throws Exception { String tableName = "testCompactionSelectAllFilesMonthlyPolicy"; - Date dateLastWeek = new Date(System.currentTimeMillis() - (7 * DAY_IN_MS)); + Date dateLastWeek = new Date(EnvironmentEdgeManager.currentTime() - (7 * DAY_IN_MS)); testCompactionAtMergeSize(tableName, MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD, CompactionType.ALL_FILES, false, false, dateLastWeek, MobCompactPartitionPolicy.MONTHLY, 7); @@ -198,7 +200,7 @@ public class TestPartitionedMobCompactor { @Test public void testCompactionSelectPartFilesMonthlyPolicyWithPastWeek() throws Exception { String tableName = "testCompactionSelectPartFilesMonthlyPolicyWithPastWeek"; - Date dateLastWeek = new Date(System.currentTimeMillis() - (7 * DAY_IN_MS)); + Date dateLastWeek = new Date(EnvironmentEdgeManager.currentTime() - (7 * DAY_IN_MS)); Calendar calendar = Calendar.getInstance(); Date firstDayOfCurrentMonth = MobUtils.getFirstDayOfMonth(calendar, new Date()); CompactionType type = CompactionType.PART_FILES; @@ -219,7 +221,7 @@ public class TestPartitionedMobCompactor { @Test public void testCompactionSelectAllFilesMonthlyPolicyWithPastWeek() throws Exception { String tableName = "testCompactionSelectAllFilesMonthlyPolicyWithPastWeek"; - Date dateLastWeek = new Date(System.currentTimeMillis() - (7 * DAY_IN_MS)); + Date dateLastWeek = new Date(EnvironmentEdgeManager.currentTime() - (7 * DAY_IN_MS)); testCompactionAtMergeSize(tableName, 3000, CompactionType.ALL_FILES, false, false, dateLastWeek, MobCompactPartitionPolicy.MONTHLY, 7); @@ -230,7 +232,7 @@ public class TestPartitionedMobCompactor { String tableName = "testCompactionSelectPartFilesMonthlyPolicyWithPastMonth"; // back 5 weeks, it is going to be a past month - Date dateLastMonth = new Date(System.currentTimeMillis() - (7 * 5 * DAY_IN_MS)); + Date dateLastMonth = new Date(EnvironmentEdgeManager.currentTime() - (7 * 5 * DAY_IN_MS)); testCompactionAtMergeSize(tableName, 200, CompactionType.PART_FILES, false, false, dateLastMonth, MobCompactPartitionPolicy.MONTHLY, 28); } @@ -240,7 +242,7 @@ public class TestPartitionedMobCompactor { String tableName = "testCompactionSelectAllFilesMonthlyPolicyWithPastMonth"; // back 5 weeks, it is going to be a past month - Date dateLastMonth = new Date(System.currentTimeMillis() - (7 * 5 * DAY_IN_MS)); + Date dateLastMonth = new Date(EnvironmentEdgeManager.currentTime() - (7 * 5 * DAY_IN_MS)); testCompactionAtMergeSize(tableName, 750, CompactionType.ALL_FILES, false, false, dateLastMonth, MobCompactPartitionPolicy.MONTHLY, 28); } @@ -425,7 +427,7 @@ public class TestPartitionedMobCompactor { new StoreFileWriter.Builder(conf, cacheConf, fs).withFileContext(meta) .withFilePath(new Path(basePath, mobFileName.getFileName())).build(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { for (int i = 0; i < 10; i++) { byte[] key = Bytes.add(Bytes.toBytes(k0), Bytes.toBytes(i)); @@ -456,7 +458,7 @@ public class TestPartitionedMobCompactor { new StoreFileWriter.Builder(conf, cacheConf, fs).withFileContext(meta) .withFilePath(new Path(basePath, mobFileName.getFileName())).build(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { byte[] key = Bytes.add(Bytes.toBytes(KEYS[startKey]), Bytes.toBytes(0)); byte[] dummyData = new byte[5000]; @@ -860,7 +862,7 @@ public class TestPartitionedMobCompactor { */ private static void writeStoreFile(final StoreFileWriter writer, byte[]row, byte[] family, byte[] qualifier, Type type, int size) throws IOException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { byte[] dummyData = new byte[size]; new Random().nextBytes(dummyData); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java index b6a47da..b7a53d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluat import org.apache.hadoop.hbase.regionserver.ChunkCreator; import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -46,7 +47,7 @@ public class RegionProcedureStorePerformanceEvaluation private final Configuration conf; private final ServerName serverName = - ServerName.valueOf("localhost", 12345, System.currentTimeMillis()); + ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()); private volatile boolean abort = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java index dab1825..3246169 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; final class RegionProcedureStoreTestHelper { @@ -39,7 +40,7 @@ final class RegionProcedureStoreTestHelper { Server server = mock(Server.class); when(server.getConfiguration()).thenReturn(conf); when(server.getServerName()) - .thenReturn(ServerName.valueOf("localhost", 12345, System.currentTimeMillis())); + .thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime())); return server; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java index d7a0ce7..b05cc67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStore.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -162,7 +163,7 @@ public class TestRegionProcedureStore extends RegionProcedureStoreTestBase { return new RpcCall() { @Override public long getDeadline() { - return System.currentTimeMillis(); + return EnvironmentEdgeManager.currentTime(); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java index 96dc990..41fcf92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java @@ -130,7 +130,7 @@ public class TestRateLimiter { // fix the current time in order to get the precise value of interval EnvironmentEdge edge = new EnvironmentEdge() { - private final long ts = System.currentTimeMillis(); + private final long ts = EnvironmentEdgeManager.currentTime(); @Override public long currentTime() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java index 9161d23..6eacf07 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoadReplication.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.After; @@ -129,8 +130,6 @@ public class TestBulkLoadReplication extends TestReplicationBase { private static final Path BULK_LOAD_BASE_DIR = new Path("/bulk_dir"); - private static Table htable3; - @Rule public TestName name = new TestName(); @@ -164,7 +163,6 @@ public class TestBulkLoadReplication extends TestReplicationBase { admin3.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); } UTIL3.waitUntilAllRegionsAssigned(tableName); - htable3 = connection3.getTable(tableName); } @Before @@ -363,7 +361,7 @@ public class TestBulkLoadReplication extends TestReplicationBase { new StoreFileWriter.Builder(util.getConfiguration(), new CacheConfig(util.getConfiguration()), util.getTestFileSystem()).withFileContext(meta) .withFilePath(new Path(basePath, mobFileName.getFileName())).build(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { for (int i = 0; i < 10; i++) { byte[] key = Bytes.add(Bytes.toBytes(rowKey), Bytes.toBytes(i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 22ec643..38fa587 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -228,15 +228,15 @@ public class TestCompactingMemStore extends TestDefaultMemStore { addRows(this.memstore); Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY); assertTrue(CellComparator.getInstance().compareRows(closestToEmpty, - new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0); + new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0); for (int i = 0; i < ROW_COUNT; i++) { Cell nr = ((CompactingMemStore)this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), - System.currentTimeMillis())); + EnvironmentEdgeManager.currentTime())); if (i + 1 == ROW_COUNT) { assertNull(nr); } else { assertTrue(CellComparator.getInstance().compareRows(nr, - new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); + new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0); } } //starting from each row, validate results should contain the starting row @@ -864,7 +864,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int cellsCount = hmc.getActive().getCellsCount(); int totalLen = 0; for (int i = 0; i < keys.length; i++) { - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); Threads.sleep(1); // to make sure each kv gets a different ts byte[] row = Bytes.toBytes(keys[i]); byte[] val = Bytes.toBytes(keys[i] + i); @@ -888,7 +888,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int cellsCount = hmc.getActive().getCellsCount(); int totalLen = 0; for (int i = 0; i < keys.length; i++) { - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); Threads.sleep(1); // to make sure each kv gets a different ts byte[] row = Bytes.toBytes(keys[i]); KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 0e9cb62..b2ce869 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.ClassRule; import org.junit.Test; @@ -493,7 +494,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore private void addRowsByKeysWith50Cols(AbstractMemStore hmc, String[] keys) { byte[] fam = Bytes.toBytes("testfamily"); for (int i = 0; i < keys.length; i++) { - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); Threads.sleep(1); // to make sure each kv gets a different ts byte[] row = Bytes.toBytes(keys[i]); for(int j =0 ;j < 50; j++) { @@ -631,8 +632,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore byte[] row = Bytes.toBytes(keys1[0]); byte[] val = Bytes.toBytes(keys1[0] + 0); KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), - System.currentTimeMillis(), val); + new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + EnvironmentEdgeManager.currentTime(), val); // test 1 bucket int totalCellsLen = addRowsByKeys(memstore, keys1); @@ -697,8 +698,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore byte[] row = Bytes.toBytes(keys1[0]); byte[] val = Bytes.toBytes(bigVal); KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), - System.currentTimeMillis(), val); + new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + EnvironmentEdgeManager.currentTime(), val); // test 1 bucket int totalCellsLen = addRowsByKeys(memstore, keys1, val); @@ -771,8 +772,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore byte[] row = Bytes.toBytes(keys1[0]); byte[] val = Bytes.toBytes(bigVal); KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), - System.currentTimeMillis(), val); + new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + EnvironmentEdgeManager.currentTime(), val); // test 1 bucket int totalCellsLen = addRowsByKeys(memstore, keys1, val); @@ -874,7 +875,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore // Measuring the size of a single kv KeyValue kv = new KeyValue(Bytes.toBytes("A"), Bytes.toBytes("testfamily"), - Bytes.toBytes("testqualifier"), System.currentTimeMillis(), val); + Bytes.toBytes("testqualifier"), EnvironmentEdgeManager.currentTime(), val); long oneCellOnCCMHeapSize = (long) ClassSize.CELL_CHUNK_MAP_ENTRY + ClassSize.align(kv.getSerializedSize()); long oneCellOnCSLMHeapSize = @@ -910,7 +911,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore byte[] qf = Bytes.toBytes("testqualifier"); MemStoreSizing memstoreSizing = new NonThreadSafeMemStoreSizing(); for (int i = 0; i < keys.length; i++) { - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); Threads.sleep(1); // to make sure each kv gets a different ts byte[] row = Bytes.toBytes(keys[i]); byte[] val = Bytes.toBytes(keys[i] + i); @@ -929,8 +930,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore byte[] row = Bytes.toBytes("A"); byte[] val = Bytes.toBytes("A" + 0); KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), - System.currentTimeMillis(), val); + new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + EnvironmentEdgeManager.currentTime(), val); return ClassSize.align( ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY + KeyValue.FIXED_OVERHEAD + kv.getSerializedSize()); } @@ -940,8 +941,8 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore byte[] row = Bytes.toBytes("A"); byte[] val = Bytes.toBytes("A" + 0); KeyValue kv = - new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), - System.currentTimeMillis(), val); + new KeyValue(row, Bytes.toBytes("testfamily"), Bytes.toBytes("testqualifier"), + EnvironmentEdgeManager.currentTime(), val); return toCellChunkMap ? ClassSize.align( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 39171da..66fd583 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -190,14 +191,14 @@ public class TestCompactionState { admin.majorCompact(table); } } - long curt = System.currentTimeMillis(); + long curt = EnvironmentEdgeManager.currentTime(); long waitTime = 5000; long endt = curt + waitTime; CompactionState state = getCompactionState(stateSource, master, admin, table); while (state == CompactionState.NONE && curt < endt) { Thread.sleep(10); state = getCompactionState(stateSource, master, admin, table); - curt = System.currentTimeMillis(); + curt = EnvironmentEdgeManager.currentTime(); } // Now, should have the right compaction state, // otherwise, the compaction should have already been done diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java index 7a6b3d2..560e2f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataBlockEncodingTool.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; @@ -81,7 +82,7 @@ public class TestDataBlockEncodingTool { new StoreFileWriter.Builder(conf, fs) .withFilePath(path) .withFileContext(meta).build(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); byte[] FAMILY = Bytes.toBytes("cf"); byte[] QUALIFIER = Bytes.toBytes("q"); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java index 9c45a83..dd0ed42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java @@ -106,7 +106,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy { conf.setFloat("hbase.hregion.majorcompaction.jitter", 0); store.storeEngine.getCompactionPolicy().setConf(conf); try { - // The modTime of the mocked store file is currentTimeMillis, so we need to increase the + // The modTime of the mocked store file is the current time, so we need to increase the // timestamp a bit to make sure that now - lowestModTime is greater than major compaction // period(1ms). // trigger an aged major compaction diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 80ec00f..973547d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -210,7 +210,7 @@ public class TestDefaultMemStore { } memstorescanners = this.memstore.getScanners(mvcc.getReadPoint()); // Assert that new values are seen in kvset as we scan. - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); count = 0; int snapshotIndex = 5; try (StoreScanner s = new StoreScanner(scan, scanInfo, null, memstorescanners)) { @@ -576,15 +576,15 @@ public class TestDefaultMemStore { addRows(this.memstore); Cell closestToEmpty = ((DefaultMemStore) this.memstore).getNextRow(KeyValue.LOWESTKEY); assertTrue(CellComparatorImpl.COMPARATOR.compareRows(closestToEmpty, - new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0); + new KeyValue(Bytes.toBytes(0), EnvironmentEdgeManager.currentTime())) == 0); for (int i = 0; i < ROW_COUNT; i++) { Cell nr = ((DefaultMemStore) this.memstore).getNextRow(new KeyValue(Bytes.toBytes(i), - System.currentTimeMillis())); + EnvironmentEdgeManager.currentTime())); if (i + 1 == ROW_COUNT) { assertNull(nr); } else { assertTrue(CellComparatorImpl.COMPARATOR.compareRows(nr, - new KeyValue(Bytes.toBytes(i + 1), System.currentTimeMillis())) == 0); + new KeyValue(Bytes.toBytes(i + 1), EnvironmentEdgeManager.currentTime())) == 0); } } //starting from each row, validate results should contain the starting row @@ -1019,7 +1019,7 @@ public class TestDefaultMemStore { protected int addRows(final MemStore hmc, final long ts) { for (int i = 0; i < ROW_COUNT; i++) { long timestamp = ts == HConstants.LATEST_TIMESTAMP ? - System.currentTimeMillis() : ts; + EnvironmentEdgeManager.currentTime() : ts; for (int ii = 0; ii < QUALIFIER_COUNT; ii++) { byte [] row = Bytes.toBytes(i); byte [] qf = makeQualifier(i, ii); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index d55812a..e798c48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.StoppableImplementation; @@ -422,14 +423,14 @@ public class TestEndToEndSplitTransaction { */ public static void blockUntilRegionSplit(Configuration conf, long timeout, final byte[] regionName, boolean waitForDaughters) throws IOException, InterruptedException { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); log("blocking until region is split:" + Bytes.toStringBinary(regionName)); RegionInfo daughterA = null, daughterB = null; try (Connection conn = ConnectionFactory.createConnection(conf); Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { Result result = null; RegionInfo region = null; - while ((System.currentTimeMillis() - start) < timeout) { + while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { result = metaTable.get(new Get(regionName)); if (result == null) { break; @@ -453,16 +454,16 @@ public class TestEndToEndSplitTransaction { // if we are here, this means the region split is complete or timed out if (waitForDaughters) { - long rem = timeout - (System.currentTimeMillis() - start); + long rem = timeout - (EnvironmentEdgeManager.currentTime() - start); blockUntilRegionIsInMeta(conn, rem, daughterA); - rem = timeout - (System.currentTimeMillis() - start); + rem = timeout - (EnvironmentEdgeManager.currentTime() - start); blockUntilRegionIsInMeta(conn, rem, daughterB); - rem = timeout - (System.currentTimeMillis() - start); + rem = timeout - (EnvironmentEdgeManager.currentTime() - start); blockUntilRegionIsOpened(conf, rem, daughterA); - rem = timeout - (System.currentTimeMillis() - start); + rem = timeout - (EnvironmentEdgeManager.currentTime() - start); blockUntilRegionIsOpened(conf, rem, daughterB); // Compacting the new region to make sure references can be cleaned up @@ -493,8 +494,8 @@ public class TestEndToEndSplitTransaction { public static void blockUntilRegionIsInMeta(Connection conn, long timeout, RegionInfo hri) throws IOException, InterruptedException { log("blocking until region is in META: " + hri.getRegionNameAsString()); - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < timeout) { + long start = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() - start < timeout) { HRegionLocation loc = MetaTableAccessor.getRegionLocation(conn, hri); if (loc != null && !loc.getRegion().isOffline()) { log("found region in META: " + hri.getRegionNameAsString()); @@ -507,7 +508,7 @@ public class TestEndToEndSplitTransaction { public static void blockUntilRegionIsOpened(Configuration conf, long timeout, RegionInfo hri) throws IOException, InterruptedException { log("blocking until region is opened for reading:" + hri.getRegionNameAsString()); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); try (Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(hri.getTable())) { byte[] row = hri.getStartKey(); @@ -516,7 +517,7 @@ public class TestEndToEndSplitTransaction { row = new byte[] { '0' }; } Get get = new Get(row); - while (System.currentTimeMillis() - start < timeout) { + while (EnvironmentEdgeManager.currentTime() - start < timeout) { try { table.get(get); break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index fdb40c1..5105c72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WALFactory; import org.junit.Assert; import org.junit.Before; @@ -114,7 +115,7 @@ public class TestHMobStore { private Cell seekKey3; private NavigableSet qualifiers = new ConcurrentSkipListSet<>(Bytes.BYTES_COMPARATOR); private List expected = new ArrayList<>(); - private long id = System.currentTimeMillis(); + private long id = EnvironmentEdgeManager.currentTime(); private Get get = new Get(row); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final String DIR = TEST_UTIL.getDataTestDir("TestHMobStore").toString(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index de8118d..9763841 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -1664,11 +1664,11 @@ public class TestHRegion { private void waitForCounter(MetricsWALSource source, String metricName, long expectedCount) throws InterruptedException { - long startWait = System.currentTimeMillis(); + long startWait = EnvironmentEdgeManager.currentTime(); long currentCount; while ((currentCount = metricsAssertHelper.getCounter(metricName, source)) < expectedCount) { Thread.sleep(100); - if (System.currentTimeMillis() - startWait > 10000) { + if (EnvironmentEdgeManager.currentTime() - startWait > 10000) { fail(String.format("Timed out waiting for '%s' >= '%s', currentCount=%s", metricName, expectedCount, currentCount)); } @@ -1902,7 +1902,7 @@ public class TestHRegion { // Setting up region this.region = initHRegion(tableName, method, CONF, fam1); // Putting data in key - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); Put put = new Put(row1); put.addColumn(fam1, qf1, now, val1); region.put(put); @@ -2049,7 +2049,7 @@ public class TestHRegion { region.put(put); // Creating put to add - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); KeyValue kv = new KeyValue(row1, fam2, qf1, ts, KeyValue.Type.Put, val2); put = new Put(row1); put.add(kv); @@ -2501,7 +2501,7 @@ public class TestHRegion { // Setting up region this.region = initHRegion(tableName, method, CONF, fam1); // Putting data in key - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); Put put = new Put(row1); put.addColumn(fam1, qf1, now, val1); region.put(put); @@ -2659,7 +2659,7 @@ public class TestHRegion { region.put(put); // Creating put to add - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); KeyValue kv = new KeyValue(row1, fam2, qf1, ts, KeyValue.Type.Put, val2); put = new Put(row1); put.add(kv); @@ -3340,7 +3340,7 @@ public class TestHRegion { region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"), Bytes.toBytes("value"))); // TS out of range. should error region.put(new Put(row).addColumn(fam, Bytes.toBytes("qual"), - System.currentTimeMillis() + 2000, Bytes.toBytes("value"))); + EnvironmentEdgeManager.currentTime() + 2000, Bytes.toBytes("value"))); fail("Expected IOE for TS out of configured timerange"); } catch (FailedSanityCheckException ioe) { LOG.debug("Received expected exception", ioe); @@ -3398,11 +3398,12 @@ public class TestHRegion { COLUMN_FAMILY_BYTES); Cell originalCell = CellUtil.createCell(row, COLUMN_FAMILY_BYTES, qual1, - System.currentTimeMillis(), KeyValue.Type.Put.getCode(), value1); + EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put.getCode(), value1); final long originalSize = originalCell.getSerializedSize(); Cell addCell = CellUtil.createCell(row, COLUMN_FAMILY_BYTES, qual1, - System.currentTimeMillis(), KeyValue.Type.Put.getCode(), Bytes.toBytes("xxxxxxxxxx")); + EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put.getCode(), + Bytes.toBytes("xxxxxxxxxx")); final long addSize = addCell.getSerializedSize(); LOG.info("originalSize:" + originalSize @@ -3544,7 +3545,7 @@ public class TestHRegion { // extract the key values out the memstore: // This is kinda hacky, but better than nothing... - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); AbstractMemStore memstore = (AbstractMemStore) region.getStore(fam1).memstore; Cell firstCell = memstore.getActive().first(); assertTrue(firstCell.getTimestamp() <= now); @@ -3817,7 +3818,7 @@ public class TestHRegion { byte[] fam4 = Bytes.toBytes("fam4"); byte[][] families = { fam1, fam2, fam3, fam4 }; - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); // Setting up region this.region = initHRegion(tableName, method, CONF, families); @@ -3875,7 +3876,7 @@ public class TestHRegion { byte[] fam1 = Bytes.toBytes("fam1"); byte[][] families = { fam1 }; - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -3928,7 +3929,7 @@ public class TestHRegion { byte[] fam1 = Bytes.toBytes("fam1"); byte[][] families = { fam1 }; - long ts1 = 1; // System.currentTimeMillis(); + long ts1 = 1; long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -4062,7 +4063,7 @@ public class TestHRegion { byte[] fam1 = Bytes.toBytes("fam1"); byte[][] families = { fam1 }; - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -4116,7 +4117,7 @@ public class TestHRegion { byte[] qf2 = Bytes.toBytes("qualifier2"); byte[] fam1 = Bytes.toBytes("fam1"); - long ts1 = 1; // System.currentTimeMillis(); + long ts1 = 1; long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -4497,7 +4498,7 @@ public class TestHRegion { region.put(put); if (i != 0 && i % compactInterval == 0) { - LOG.debug("iteration = " + i+ " ts="+System.currentTimeMillis()); + LOG.debug("iteration = " + i+ " ts=" + EnvironmentEdgeManager.currentTime()); region.compact(true); } @@ -4516,7 +4517,7 @@ public class TestHRegion { if (!toggle) { flushThread.flush(); } - assertEquals("toggle="+toggle+"i=" + i + " ts="+System.currentTimeMillis(), + assertEquals("toggle="+toggle+"i=" + i + " ts=" + EnvironmentEdgeManager.currentTime(), expectedCount, res.size()); toggle = !toggle; } @@ -5592,7 +5593,7 @@ public class TestHRegion { htd.addFamily(new HColumnDescriptor(family)); } - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, time, 0); @@ -5642,7 +5643,7 @@ public class TestHRegion { htd.addFamily(new HColumnDescriptor(family)); } - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, time, 0); @@ -5701,7 +5702,7 @@ public class TestHRegion { htd.addFamily(new HColumnDescriptor(family)); } - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, time, 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 3e00540..745ba80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Assert; @@ -332,7 +333,7 @@ public class TestHRegionInfo { public void testParseName() throws IOException { final TableName tableName = TableName.valueOf(name.getMethodName()); byte[] startKey = Bytes.toBytes("startKey"); - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); int replicaId = 42; // test without replicaId @@ -362,7 +363,7 @@ public class TestHRegionInfo { byte[] startKey = Bytes.toBytes("startKey"); byte[] endKey = Bytes.toBytes("endKey"); boolean split = false; - long regionId = System.currentTimeMillis(); + long regionId = EnvironmentEdgeManager.currentTime(); int replicaId = 42; @@ -402,7 +403,7 @@ public class TestHRegionInfo { checkEquality(h, conf); // check HRIs with non-default replicaId h = new HRegionInfo(TableName.valueOf(name.getMethodName()), startKey, endKey, false, - System.currentTimeMillis(), 1); + EnvironmentEdgeManager.currentTime(), 1); checkEquality(h, conf); Assert.assertArrayEquals(HRegionInfo.HIDDEN_END_KEY, HRegionInfo.getEndKeyForDisplay(h, conf)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 7b62119..cd7db9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -173,7 +173,7 @@ public class TestHRegionReplayEvents { } htd = builder.build(); - long time = System.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); primaryHri = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index d167f9b..7786797 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -163,7 +164,7 @@ public class TestHRegionServerBulkLoad { .withPath(fs, path) .withFileContext(context) .create(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { // subtract 2 since iterateOnSplits doesn't include boundary keys for (int i = 0; i < numRows; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 3972bc6..6b84a5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -155,7 +155,7 @@ public class TestHStore { List expected = new ArrayList<>(); List result = new ArrayList<>(); - long id = System.currentTimeMillis(); + long id = EnvironmentEdgeManager.currentTime(); Get get = new Get(row); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -1059,7 +1059,7 @@ public class TestHStore { @Test public void testNumberOfMemStoreScannersAfterFlush() throws IOException { long seqId = 100; - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); Cell cell0 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family) .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.Type.Put) .setValue(qf1).build(); @@ -1073,7 +1073,7 @@ public class TestHStore { testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1)); seqId = 101; - timestamp = System.currentTimeMillis(); + timestamp = EnvironmentEdgeManager.currentTime(); Cell cell2 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family) .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.Type.Put) .setValue(qf1).build(); @@ -1520,7 +1520,7 @@ public class TestHStore { @Test public void testAge() throws IOException { - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); ManualEnvironmentEdge edge = new ManualEnvironmentEdge(); edge.setValue(currentTime); EnvironmentEdgeManager.injectEdge(edge); @@ -1608,7 +1608,7 @@ public class TestHStore { // Set the lower threshold to invoke the "MERGE" policy MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() {}); MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); long seqID = 1L; // Add some data to the region and do some flushes for (int i = 1; i < 10; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java index b7d0db0..239d967 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -152,7 +153,7 @@ public class TestHStoreFile extends HBaseTestCase { */ public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier) throws IOException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { for (char d = FIRST_CHAR; d <= LAST_CHAR; d++) { for (char e = FIRST_CHAR; e <= LAST_CHAR; e++) { @@ -541,7 +542,7 @@ public class TestHStoreFile extends HBaseTestCase { private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception { float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0); Path f = writer.getPath(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < 2000; i += 2) { String row = String.format(localFormatter, i); KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), @@ -636,7 +637,7 @@ public class TestHStoreFile extends HBaseTestCase { .build(); // add delete family - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < 2000; i += 2) { String row = String.format(localFormatter, i); KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), @@ -745,9 +746,9 @@ public class TestHStoreFile extends HBaseTestCase { .withFileContext(meta) .build(); - long now = System.currentTimeMillis(); - for (int i = 0; i < rowCount*2; i += 2) { // rows - for (int j = 0; j < colCount*2; j += 2) { // column qualifiers + long now = EnvironmentEdgeManager.currentTime(); + for (int i = 0; i < rowCount * 2; i += 2) { // rows + for (int j = 0; j < colCount * 2; j += 2) { // column qualifiers String row = String.format(localFormatter, i); String col = String.format(localFormatter, j); for (int k= 0; k < versions; ++k) { // versions diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 308a338..efcbf5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPoli import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Before; @@ -245,8 +246,8 @@ public class TestMajorCompaction { // should result in a compacted store file that has no references to the // deleted row. LOG.debug("Adding deletes to memstore and flushing"); - Delete delete = new Delete(secondRowBytes, System.currentTimeMillis()); - byte [][] famAndQf = {COLUMN_FAMILY, null}; + Delete delete = new Delete(secondRowBytes, EnvironmentEdgeManager.currentTime()); + byte[][] famAndQf = { COLUMN_FAMILY, null }; delete.addFamily(famAndQf[0]); r.delete(delete); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java index dec55a6..cba05a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -80,7 +81,8 @@ public class TestMasterAddressTracker { @Test public void testDeleteIfEquals() throws Exception { - final ServerName sn = ServerName.valueOf("localhost", 1234, System.currentTimeMillis()); + final ServerName sn = ServerName.valueOf("localhost", 1234, + EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1772); try { assertFalse("shouldn't have deleted wrong master server.", @@ -136,7 +138,8 @@ public class TestMasterAddressTracker { public void testMasterAddressTrackerFromZK() throws Exception { // Create the master node with a dummy address final int infoPort = 1235; - final ServerName sn = ServerName.valueOf("localhost", 1234, System.currentTimeMillis()); + final ServerName sn = ServerName.valueOf("localhost", 1234, + EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, infoPort); try { assertTrue(addressTracker.hasMaster()); @@ -157,12 +160,14 @@ public class TestMasterAddressTracker { @Test public void testNoBackups() throws Exception { - final ServerName sn = ServerName.valueOf("localhost", 1234, System.currentTimeMillis()); + final ServerName sn = ServerName.valueOf("localhost", 1234, + EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1772); try { assertEquals("Should receive 0 for backup not found.", 0, - addressTracker.getBackupMasterInfoPort( - ServerName.valueOf("doesnotexist.example.com", 1234, System.currentTimeMillis()))); + addressTracker.getBackupMasterInfoPort( + ServerName.valueOf("doesnotexist.example.com", 1234, + EnvironmentEdgeManager.currentTime()))); } finally { assertTrue("Couldn't clean up master", MasterAddressTracker.deleteIfEquals(addressTracker.getWatcher(), sn.toString())); @@ -179,7 +184,8 @@ public class TestMasterAddressTracker { @Test public void testBackupMasters() throws Exception { - final ServerName sn = ServerName.valueOf("localhost", 5678, System.currentTimeMillis()); + final ServerName sn = ServerName.valueOf("localhost", 5678, + EnvironmentEdgeManager.currentTime()); final MasterAddressTracker addressTracker = setupMasterTracker(sn, 1111); assertTrue(addressTracker.hasMaster()); ServerName activeMaster = addressTracker.getMasterAddress(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java index bf01ddb..7ad54db 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreSegmentsIterator.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; @@ -112,8 +113,8 @@ public class TestMemStoreSegmentsIterator { final byte[] f = Bytes.toBytes(FAMILY); final byte[] q = Bytes.toBytes(COLUMN); final byte[] v = Bytes.toBytes(3); - final KeyValue kv1 = new KeyValue(one, f, q, System.currentTimeMillis(), v); - final KeyValue kv2 = new KeyValue(two, f, q, System.currentTimeMillis(), v); + final KeyValue kv1 = new KeyValue(one, f, q, EnvironmentEdgeManager.currentTime(), v); + final KeyValue kv2 = new KeyValue(two, f, q, EnvironmentEdgeManager.currentTime(), v); // the seqId of first cell less than Integer.MAX_VALUE, // the seqId of second cell greater than integer.MAX_VALUE kv1.setSequenceId(LESS_THAN_INTEGER_MAX_VALUE_SEQ_ID); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java index 25e1223..715c63a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; import org.junit.ClassRule; @@ -343,13 +344,13 @@ public class TestMobStoreCompaction { HFileContext meta = new HFileContextBuilder().build(); HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path) .withFileContext(meta).create(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY, Bytes.toBytes("colX"), now, dummyData); writer.append(kv); } finally { - writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); + writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); writer.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java index aaea39f..c430a14 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.junit.AfterClass; import org.junit.Assert; @@ -181,7 +182,7 @@ public class TestMobStoreScanner { public void testReadPt() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); setUp(0L, tableName); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); byte[] value1 = Bytes.toBytes("value1"); Put put1 = new Put(row1); put1.addColumn(family, qf1, ts, value1); @@ -281,7 +282,7 @@ public class TestMobStoreScanner { private void testGet(TableName tableName, boolean reversed, boolean doFlush) throws Exception { setUp(defaultThreshold, tableName); - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; byte [] value = generateMobValue((int)defaultThreshold+1); @@ -304,7 +305,7 @@ public class TestMobStoreScanner { private void testGetReferences(boolean reversed) throws Exception { TableName tn = TableName.valueOf("testGetReferences" + reversed); setUp(defaultThreshold, tn); - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; byte [] value = generateMobValue((int)defaultThreshold+1);; @@ -340,7 +341,7 @@ public class TestMobStoreScanner { byte [] valueLess = generateMobValue((int)defaultThreshold-1); byte [] valueEqual = generateMobValue((int)defaultThreshold); byte [] valueGreater = generateMobValue((int)defaultThreshold+1); - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; @@ -387,7 +388,7 @@ public class TestMobStoreScanner { private void testGetFromArchive(boolean reversed) throws Exception { TableName tn = TableName.valueOf("testGetFromArchive" + reversed); setUp(defaultThreshold, tn); - long ts1 = System.currentTimeMillis(); + long ts1 = EnvironmentEdgeManager.currentTime(); long ts2 = ts1 + 1; long ts3 = ts1 + 2; byte [] value = generateMobValue((int)defaultThreshold+1);; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java index 52e154f..2327ce3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionIncrement.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.TestIncrementsFromClientSide; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -179,7 +180,7 @@ public class TestRegionIncrement { throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { @@ -204,7 +205,8 @@ public class TestRegionIncrement { assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); - LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms"); + LOG.info(this.name.getMethodName() + " " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } @@ -219,7 +221,7 @@ public class TestRegionIncrement { throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { CrossRowCellIncrementer [] threads = new CrossRowCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { @@ -241,7 +243,8 @@ public class TestRegionIncrement { assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); - LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms"); + LOG.info(this.name.getMethodName() + " " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 7c1125d..be52892 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Pair; @@ -249,9 +250,9 @@ public class TestRegionMergeTransactionOnCluster { ADMIN.compactRegion(mergedRegionInfo.getRegionName()); // clean up the merged region store files // wait until merged region have reference file - long timeout = System.currentTimeMillis() + waitTime; + long timeout = EnvironmentEdgeManager.currentTime() + waitTime; int newcount = 0; - while (System.currentTimeMillis() < timeout) { + while (EnvironmentEdgeManager.currentTime() < timeout) { for(ColumnFamilyDescriptor colFamily : columnFamilies) { newcount += hrfs.getStoreFiles(colFamily.getName()).size(); } @@ -269,7 +270,7 @@ public class TestRegionMergeTransactionOnCluster { cleaner.chore(); Thread.sleep(1000); } - while (System.currentTimeMillis() < timeout) { + while (EnvironmentEdgeManager.currentTime() < timeout) { int newcount1 = 0; for(ColumnFamilyDescriptor colFamily : columnFamilies) { newcount1 += hrfs.getStoreFiles(colFamily.getName()).size(); @@ -437,8 +438,8 @@ public class TestRegionMergeTransactionOnCluster { int expectedRegionNum) throws Exception { List> tableRegionsInMeta; List tableRegionsInMaster; - long timeout = System.currentTimeMillis() + waitTime; - while (System.currentTimeMillis() < timeout) { + long timeout = EnvironmentEdgeManager.currentTime() + waitTime; + while (EnvironmentEdgeManager.currentTime() < timeout) { tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(TEST_UTIL.getConnection(), tablename); tableRegionsInMaster = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java index acdb31e..833dc65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -114,8 +115,8 @@ public class TestRegionOpen { HTU.waitUntilNoRegionsInTransition(60000); // Create new HRI with non-default region replica id - HRegionInfo hri = new HRegionInfo(htd.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("B"), false, - System.currentTimeMillis(), 2); + HRegionInfo hri = new HRegionInfo(htd.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("B"), + false, EnvironmentEdgeManager.currentTime(), 2); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, CommonFSUtils.getTableDir(rootDir, hri.getTable()), hri); Path regionDir = regionFs.getRegionDir(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 8e805ff..e937ca1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -292,8 +293,8 @@ public class TestRegionReplicas { // force compaction HTU.compact(table.getName(), true); - long wakeUpTime = System.currentTimeMillis() + 4 * refreshPeriod; - while (System.currentTimeMillis() < wakeUpTime) { + long wakeUpTime = EnvironmentEdgeManager.currentTime() + 4 * refreshPeriod; + while (EnvironmentEdgeManager.currentTime() < wakeUpTime) { assertGetRpc(hriSecondary, 42, true); assertGetRpc(hriSecondary, 1042, true); assertGetRpc(hriSecondary, 2042, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java index cdbc092..1cb1c02 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbortTimeout.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -111,9 +112,9 @@ public class TestRegionServerAbortTimeout { // Abort one region server UTIL.getMiniHBaseCluster().getRegionServer(0).abort("Abort RS for test"); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long timeout = REGIONS_NUM * SLEEP_TIME_WHEN_CLOSE_REGION * 10; - while (System.currentTimeMillis() - startTime < timeout) { + while (EnvironmentEdgeManager.currentTime() - startTime < timeout) { if (UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size() == 1) { assertTrue("Abort timer task should be scheduled", abortTimeoutTaskScheduled); return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 4cae2ae..772c3b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.AfterClass; @@ -156,14 +157,14 @@ public class TestRegionServerMetrics { } public void waitTableDeleted(TableName name, long timeoutInMillis) throws Exception { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (true) { HTableDescriptor[] tables = admin.listTables(); for (HTableDescriptor htd : tables) { if (htd.getNameAsString() == name.getNameAsString()) return; } - if (System.currentTimeMillis() - start > timeoutInMillis) + if (EnvironmentEdgeManager.currentTime() - start > timeoutInMillis) return; Thread.sleep(1000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java index da39551..4d9bb68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -38,9 +38,9 @@ import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.Threads; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.log4j.Appender; @@ -264,7 +264,7 @@ public class TestRegionServerReportForDuty { } /** - * Tests region sever reportForDuty with manual environment edge + * Tests region sever reportForDuty with a non-default environment edge */ @Test public void testReportForDutyWithEnvironmentEdge() throws Exception { @@ -282,15 +282,14 @@ public class TestRegionServerReportForDuty { cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, tablesOnMaster ? 2 : 1); - // Inject manual environment edge for clock skew computation between RS and master - ManualEnvironmentEdge edge = new ManualEnvironmentEdge(); + // Inject non-default environment edge + IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(edge); master = cluster.addMaster(); rs = cluster.addRegionServer(); LOG.debug("Starting master: " + master.getMaster().getServerName()); master.start(); rs.start(); - waitForClusterOnline(master); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java index 88f7762..abcaa4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -82,7 +83,7 @@ public class TestReversibleScanners { HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static byte[] FAMILYNAME = Bytes.toBytes("testCf"); - private static long TS = System.currentTimeMillis(); + private static long TS = EnvironmentEdgeManager.currentTime(); private static int MAXMVCC = 7; private static byte[] ROW = Bytes.toBytes("testRow"); private static final int ROWSIZE = 200; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java index 166783f..52e0461 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowPrefixBloomFilter.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; @@ -149,7 +150,7 @@ public class TestRowPrefixBloomFilter { .withMaxKeyCount(expKeys) .withFileContext(meta) .build(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { //Put with valid row style for (int i = 0; i < prefixRowCount; i += 2) { // prefix rows diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 2eab509..47e97b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -258,7 +259,7 @@ public class TestScanner { // Write information to the meta table - Put put = new Put(ROW_KEY, System.currentTimeMillis()); + Put put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, REGION_INFO.toByteArray()); @@ -285,7 +286,7 @@ public class TestScanner { String address = HConstants.LOCALHOST_IP + ":" + HBaseTestingUtility.randomFreePort(); - put = new Put(ROW_KEY, System.currentTimeMillis()); + put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(address)); @@ -322,7 +323,7 @@ public class TestScanner { address = "bar.foo.com:4321"; - put = new Put(ROW_KEY, System.currentTimeMillis()); + put = new Put(ROW_KEY, EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(address)); table.put(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java index 94491f9..e4c996d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -85,7 +86,7 @@ public class TestScannerWithBulkload { @Test public void testBulkLoad() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - long l = System.currentTimeMillis(); + long l = EnvironmentEdgeManager.currentTime(); Admin admin = TEST_UTIL.getAdmin(); createTable(admin, tableName); Scan scan = createScan(); @@ -184,7 +185,8 @@ public class TestScannerWithBulkload { writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999))); } else { - writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); + writer.appendFileInfo(BULKLOAD_TIME_KEY, + Bytes.toBytes(EnvironmentEdgeManager.currentTime())); } writer.close(); return hfilePath; @@ -223,7 +225,7 @@ public class TestScannerWithBulkload { @Test public void testBulkLoadWithParallelScan() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - final long l = System.currentTimeMillis(); + final long l = EnvironmentEdgeManager.currentTime(); final Admin admin = TEST_UTIL.getAdmin(); createTable(admin, tableName); Scan scan = createScan(); @@ -267,7 +269,7 @@ public class TestScannerWithBulkload { @Test public void testBulkLoadNativeHFile() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - long l = System.currentTimeMillis(); + long l = EnvironmentEdgeManager.currentTime(); Admin admin = TEST_UTIL.getAdmin(); createTable(admin, tableName); Scan scan = createScan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java index 2858419..258350c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -61,7 +62,7 @@ public class TestSimpleTimeRangeTracker { @Test public void testTimeRangeInitialized() { TimeRangeTracker src = getTimeRangeTracker(); - TimeRange tr = new TimeRange(System.currentTimeMillis()); + TimeRange tr = TimeRange.from(EnvironmentEdgeManager.currentTime()); assertFalse(src.includesTimeRange(tr)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index f1cb1c9..8238406 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Test; @@ -109,7 +110,7 @@ public class TestStoreFileScannerWithTagCompression { private void writeStoreFile(final StoreFileWriter writer) throws IOException { byte[] fam = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); byte[] b = Bytes.toBytes("k1"); Tag t1 = new ArrayBackedTag((byte) 1, "tag1"); Tag t2 = new ArrayBackedTag((byte) 2, "tag2"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 207d158..e7ec300 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.junit.ClassRule; import org.junit.Ignore; @@ -636,7 +637,7 @@ public class TestStoreScanner { @Test public void testDeleteVersionMaskingMultiplePuts() throws IOException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); KeyValue [] kvs1 = new KeyValue[] { create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care") @@ -662,7 +663,7 @@ public class TestStoreScanner { @Test public void testDeleteVersionsMixedAndMultipleVersionReturn() throws IOException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); KeyValue [] kvs1 = new KeyValue[] { create("R1", "cf", "a", now, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "a", now, KeyValue.Type.Delete, "dont-care") @@ -823,7 +824,7 @@ public class TestStoreScanner { */ @Test public void testWildCardTtlScan() throws IOException { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); KeyValue [] kvs = new KeyValue[] { create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"), create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"), @@ -889,7 +890,7 @@ public class TestStoreScanner { */ @Test public void testExpiredDeleteFamily() throws Exception { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); KeyValue[] kvs = new KeyValue[] { new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000, KeyValue.Type.DeleteFamily), @@ -916,7 +917,7 @@ public class TestStoreScanner { @Test public void testDeleteMarkerLongevity() throws Exception { try { - final long now = System.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() { @Override public long currentTime() { @@ -986,7 +987,7 @@ public class TestStoreScanner { @Test public void testPreadNotEnabledForCompactionStoreScanners() throws Exception { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); KeyValue[] kvs = new KeyValue[] { new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now - 1000, KeyValue.Type.DeleteFamily), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 09d6934..b37df0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; @@ -239,7 +240,7 @@ public class TestWALLockup { Put put = new Put(bytes); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes); WALKeyImpl key = new WALKeyImpl(region.getRegionInfo().getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, System.currentTimeMillis(), mvcc, scopes); + TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime(), mvcc, scopes); WALEdit edit = new WALEdit(); CellScanner CellScanner = put.cellScanner(); assertTrue(CellScanner.advance()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index 45cafb7..9574d0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -77,7 +78,7 @@ public class TestWideScanner extends HBaseTestCase { for (char c = 'a'; c <= 'c'; c++) { byte[] row = Bytes.toBytes("ab" + c); int i, j; - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); for (i = 0; i < 100; i++) { byte[] b = Bytes.toBytes(String.format("%10d", i)); for (j = 0; j < 100; j++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java index 9fee333..3090572 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java @@ -216,7 +216,7 @@ public class TestFIFOCompactionPolicy { .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build()) .build(); Table table = TEST_UTIL.createTable(desc, null); - long ts = System.currentTimeMillis() - 10 * 1000; + long ts = EnvironmentEdgeManager.currentTime() - 10 * 1000; Put put = new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0")); table.put(put); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java index 9198bd5..3157e33 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.junit.ClassRule; import org.junit.Test; @@ -125,18 +126,18 @@ public class TestCompactionWithThroughputController { try { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); TEST_UTIL.getAdmin().majorCompact(tableName); while (store.getStorefilesCount() != 1) { Thread.sleep(20); } - long duration = System.currentTimeMillis() - startTime; + long duration = EnvironmentEdgeManager.currentTime() - startTime; double throughput = (double) store.getStorefilesSize() / duration * 1000; // confirm that the speed limit work properly(not too fast, and also not too slow) // 20% is the max acceptable error rate. assertTrue(throughput < throughputLimit * 1.2); assertTrue(throughput > throughputLimit * 0.8); - return System.currentTimeMillis() - startTime; + return EnvironmentEdgeManager.currentTime() - startTime; } finally { TEST_UTIL.shutdownMiniCluster(); } @@ -154,12 +155,12 @@ public class TestCompactionWithThroughputController { try { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); TEST_UTIL.getAdmin().majorCompact(tableName); while (store.getStorefilesCount() != 1) { Thread.sleep(20); } - return System.currentTimeMillis() - startTime; + return EnvironmentEdgeManager.currentTime() - startTime; } finally { TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 6d0771e..a633b57 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -181,7 +181,7 @@ public abstract class AbstractTestFSWAL { throws IOException { final byte[] row = Bytes.toBytes(cf); for (int i = 0; i < times; i++) { - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, row, row, timestamp, row)); WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), htd.getTableName(), @@ -390,7 +390,7 @@ public abstract class AbstractTestFSWAL { final String name = "testFailedToCreateWALIfParentRenamed"; AbstractFSWAL wal = newWAL(FS, CommonFSUtils.getWALRootDir(CONF), name, HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null); - long filenum = System.currentTimeMillis(); + long filenum = EnvironmentEdgeManager.currentTime(); Path path = wal.computeFilename(filenum); wal.createWriterInstance(path); Path parent = path.getParent(); @@ -469,7 +469,7 @@ public abstract class AbstractTestFSWAL { for (int i = 0; i < countPerFamily; i++) { final RegionInfo info = region.getRegionInfo(); final WALKeyImpl logkey = new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, - System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes); + EnvironmentEdgeManager.currentTime(), clusterIds, -1, -1, region.getMVCC(), scopes); wal.append(info, logkey, edits, true); region.getMVCC().completeAndWait(logkey.getWriteEntry()); } @@ -512,7 +512,7 @@ public abstract class AbstractTestFSWAL { for (byte[] fam : td.getColumnFamilyNames()) { scopes.put(fam, 0); } - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); byte[] row = Bytes.toBytes("row"); WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, row, row, timestamp, row)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java index 9322c5e..e3da0ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRollPeriod.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -140,7 +141,7 @@ public abstract class AbstractTestLogRollPeriod { }); // Sleep until we should get at least min-LogRoll events - long wtime = System.currentTimeMillis(); + long wtime = EnvironmentEdgeManager.currentTime(); Thread.sleep((minRolls + 1) * LOG_ROLL_PERIOD); // Do some extra sleep in case the machine is slow, // and the log-roll is not triggered exactly on LOG_ROLL_PERIOD. @@ -148,7 +149,7 @@ public abstract class AbstractTestLogRollPeriod { for (int retry = 0; paths.size() < minRolls && retry < NUM_RETRIES; ++retry) { Thread.sleep(LOG_ROLL_PERIOD / 4); } - wtime = System.currentTimeMillis() - wtime; + wtime = EnvironmentEdgeManager.currentTime() - wtime; LOG.info(String.format("got %d rolls after %dms (%dms each) - expected at least %d rolls", paths.size(), wtime, wtime / paths.size(), minRolls)); assertFalse(paths.size() < minRolls); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java index c3f3277..51a2f98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALFactory; @@ -133,10 +134,9 @@ public abstract class AbstractTestProtobufLog { private void doRead(boolean withTrailer) throws IOException { final int columnCount = 5; final int recordCount = 5; - final TableName tableName = - TableName.valueOf("tablename"); + final TableName tableName = TableName.valueOf("tablename"); final byte[] row = Bytes.toBytes("row"); - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); Path path = new Path(dir, "tempwal"); // delete the log if already exists, for test only fs.delete(path, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 9c069bd..69b9ea4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -155,8 +155,8 @@ public abstract class AbstractTestWALReplay { this.hbaseRootDir = CommonFSUtils.getRootDir(this.conf); this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME); String serverName = - ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, System.currentTimeMillis()) - .toString(); + ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010, ee.currentTime()) + .toString(); this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); this.logDir = new Path(this.hbaseRootDir, logName); if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java index f413baf..1ec89eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; @@ -136,7 +137,7 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL { for (byte[] fam : td.getColumnFamilyNames()) { scopes.put(fam, 0); } - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTime(); String testName = currentTest.getMethodName(); AtomicInteger failedCount = new AtomicInteger(0); try (LogRoller roller = new LogRoller(services); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java index 2ba70ad..469b289 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; @@ -124,7 +125,8 @@ public class TestDurability { @Test public void testDurability() throws Exception { WALFactory wals = new WALFactory(CONF, - ServerName.valueOf("TestDurability", 16010, System.currentTimeMillis()).toString()); + ServerName.valueOf("TestDurability", 16010, EnvironmentEdgeManager.currentTime()) + .toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); WAL wal = region.getWAL(); HRegion deferredRegion = createHRegion(region.getTableDescriptor(), region.getRegionInfo(), @@ -188,7 +190,8 @@ public class TestDurability { // Setting up region WALFactory wals = new WALFactory(CONF, - ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString()); + ServerName.valueOf("TestIncrement", 16010, EnvironmentEdgeManager.currentTime()) + .toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); WAL wal = region.getWAL(); @@ -253,9 +256,9 @@ public class TestDurability { // Setting up region WALFactory wals = new WALFactory(CONF, - ServerName - .valueOf("testIncrementWithReturnResultsSetToFalse", 16010, System.currentTimeMillis()) - .toString()); + ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", + 16010, EnvironmentEdgeManager.currentTime()) + .toString()); HRegion region = createHRegion(wals, Durability.USE_DEFAULT); Increment inc1 = new Increment(row1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index d0274de..9d32594 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -189,7 +190,7 @@ public class TestLogRollAbort { public void testLogRollAfterSplitStart() throws IOException { LOG.info("Verify wal roll after split starts will fail."); String logName = ServerName.valueOf("testLogRollAfterSplitStart", - 16010, System.currentTimeMillis()).toString(); + 16010, EnvironmentEdgeManager.currentTime()).toString(); Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName)); final WALFactory wals = new WALFactory(conf, logName); @@ -208,7 +209,7 @@ public class TestLogRollAbort { NavigableMap scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); scopes.put(Bytes.toBytes("column"), 0); log.appendData(regionInfo, new WALKeyImpl(regionInfo.getEncodedNameAsBytes(), tableName, - System.currentTimeMillis(), mvcc, scopes), kvs); + EnvironmentEdgeManager.currentTime(), mvcc, scopes), kvs); } // Send the data to HDFS datanodes and close the HDFS writer log.sync(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 0712b59..8b4b710 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.RecoverLeaseFSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -289,7 +290,7 @@ public class TestLogRolling extends AbstractTestLogRolling { } Put tmpPut = new Put(Bytes.toBytes("tmprow")); tmpPut.addColumn(HConstants.CATALOG_FAMILY, null, value); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long remaining = timeout; while (remaining > 0) { if (log.isLowReplicationRollEnabled() == expect) { @@ -302,7 +303,7 @@ public class TestLogRolling extends AbstractTestLogRolling { } catch (InterruptedException e) { // continue } - remaining = timeout - (System.currentTimeMillis() - startTime); + remaining = timeout - (EnvironmentEdgeManager.currentTime() - startTime); } } } @@ -367,7 +368,7 @@ public class TestLogRolling extends AbstractTestLogRolling { writeData(table, 2); - long curTime = System.currentTimeMillis(); + long curTime = EnvironmentEdgeManager.currentTime(); LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName()); long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); assertTrue("Log should have a timestamp older than now", @@ -462,7 +463,7 @@ public class TestLogRolling extends AbstractTestLogRolling { writeData(table, 1002); - long curTime = System.currentTimeMillis(); + long curTime = EnvironmentEdgeManager.currentTime(); LOG.info("log.getCurrentFileName()): " + AbstractFSWALProvider.getCurrentFileName(log)); long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log); assertTrue("Log should have a timestamp older than now", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index a8b6490..bde2292 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; @@ -158,7 +159,7 @@ public class TestLogRollingNoCluster { final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(); try { for (int i = 0; i < this.count; i++) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Roll every ten edits if (i % 10 == 0) { this.wal.rollWriter(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java index 2d72618..511161c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEmptyWALRecovery.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterfa import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALEdit; @@ -125,7 +126,7 @@ import org.junit.experimental.categories.Category; final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size(); // for each RS, create an empty wal with same walGroupId final List emptyWalPaths = new ArrayList<>(); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = UTIL1.getHBaseCluster().getRegions(htable1.getName()).get(0).getRegionInfo(); @@ -165,7 +166,7 @@ import org.junit.experimental.categories.Category; final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size(); // for each RS, create an empty wal with same walGroupId final List emptyWalPaths = new ArrayList<>(); - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = UTIL1.getHBaseCluster().getRegions(tableName.getName()).get(0).getRegionInfo(); @@ -223,8 +224,7 @@ import org.junit.experimental.categories.Category; final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size(); // for each RS, create an empty wal with same walGroupId final List emptyWalPaths = new ArrayList<>(); - - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); WAL wal = null; for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = @@ -279,8 +279,7 @@ import org.junit.experimental.categories.Category; final int numRs = UTIL1.getHBaseCluster().getRegionServerThreads().size(); // for each RS, create an empty wal with same walGroupId final List emptyWalPaths = new ArrayList<>(); - - long ts = System.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); WAL wal = null; for (int i = 0; i < numRs; i++) { RegionInfo regionInfo = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java index b72975f..5b13387 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; @@ -420,7 +421,7 @@ public class TestReplicationEndpoint extends TestReplicationBase { private Entry createEntry(String tableName, TreeMap scopes, byte[]... kvs) { WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf(tableName), - System.currentTimeMillis() - 1L, + EnvironmentEdgeManager.currentTime() - 1L, scopes); WALEdit edit1 = new WALEdit(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java index c245726..360424a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationKillRS.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.ClassRule; import org.junit.experimental.categories.Category; import org.slf4j.Logger; @@ -84,14 +85,14 @@ public class TestReplicationKillRS extends TestReplicationBase { } int lastCount = 0; - final long start = System.currentTimeMillis(); + final long start = EnvironmentEdgeManager.currentTime(); int i = 0; try (Connection conn = ConnectionFactory.createConnection(CONF2)) { try (Table table = conn.getTable(tableName)) { while (true) { if (i == NB_RETRIES - 1) { fail("Waited too much time for queueFailover replication. " + "Waited " - + (System.currentTimeMillis() - start) + "ms."); + + (EnvironmentEdgeManager.currentTime() - start) + "ms."); } Result[] res2; try (ResultScanner scanner = table.getScanner(new Scan())) { ... 1688 lines suppressed ...