hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1360858 [3/3] - in /hadoop/common/trunk/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/ hadoop-hdfs-httpfs/src/t...
Date Thu, 12 Jul 2012 19:01:56 GMT
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
Thu Jul 12 19:01:49 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Test;
 
@@ -64,10 +65,10 @@ public class TestDeadDatanode {
    */
   private void waitForDatanodeState(String nodeID, boolean alive, int waitTime)
       throws TimeoutException, InterruptedException {
-    long stopTime = System.currentTimeMillis() + waitTime;
+    long stopTime = Time.now() + waitTime;
     FSNamesystem namesystem = cluster.getNamesystem();
     String state = alive ? "alive" : "dead";
-    while (System.currentTimeMillis() < stopTime) {
+    while (Time.now() < stopTime) {
       final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
           namesystem, nodeID);
       if (dd.isAlive == alive) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
Thu Jul 12 19:01:49 2012
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.aspectj.util.FileUtil;
 
@@ -1382,7 +1383,7 @@ public class TestEditLog extends TestCas
     }
 
     // How long does it take to read through all these edit logs?
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     try {
       cluster = new MiniDFSCluster.Builder(conf).
           numDataNodes(NUM_DATA_NODES).build();
@@ -1392,7 +1393,7 @@ public class TestEditLog extends TestCas
         cluster.shutdown();
       }
     }
-    long endTime = System.currentTimeMillis();
+    long endTime = Time.now();
     double delta = ((float)(endTime - startTime)) / 1000.0;
     LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
         NUM_EDIT_LOG_ROLLS, delta));

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
Thu Jul 12 19:01:49 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 
 import static org.junit.Assert.*;
@@ -410,9 +411,9 @@ public class TestEditLogRace {
       LOG.info("Trying to enter safe mode.");
       LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that
long");
       
-      long st = System.currentTimeMillis();
+      long st = Time.now();
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      long et = System.currentTimeMillis();
+      long et = Time.now();
       LOG.info("Entered safe mode");
       // Make sure we really waited for the flush to complete!
       assertTrue(et - st > (BLOCK_TIME - 1)*1000);
@@ -503,9 +504,9 @@ public class TestEditLogRace {
       LOG.info("Trying to enter safe mode.");
       LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
       
-      long st = System.currentTimeMillis();
+      long st = Time.now();
       namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      long et = System.currentTimeMillis();
+      long et = Time.now();
       LOG.info("Entered safe mode");
       // Make sure we really waited for the flush to complete!
       assertTrue(et - st > (BLOCK_TIME - 1)*1000);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
Thu Jul 12 19:01:49 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -135,10 +136,10 @@ public class TestLargeDirectoryDelete {
     threads[0].start();
     threads[1].start();
     
-    final long start = System.currentTimeMillis();
+    final long start = Time.now();
     FSNamesystem.BLOCK_DELETION_INCREMENT = 1;
     mc.getFileSystem().delete(new Path("/root"), true); // recursive delete
-    final long end = System.currentTimeMillis();
+    final long end = Time.now();
     threads[0].endThread();
     threads[1].endThread();
     LOG.info("Deletion took " + (end - start) + "msecs");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
Thu Jul 12 19:01:49 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
+import org.apache.hadoop.util.Time;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -119,9 +120,9 @@ public class TestNameNodeResourceChecker
       Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false);
 
       // Make sure the NNRM thread has a chance to run.
-      long startMillis = System.currentTimeMillis();
+      long startMillis = Time.now();
       while (!cluster.getNameNode().isInSafeMode() &&
-          System.currentTimeMillis() < startMillis + (60 * 1000)) {
+          Time.now() < startMillis + (60 * 1000)) {
         Thread.sleep(1000);
       }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
Thu Jul 12 19:01:49 2012
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.base.Supplier;
 
@@ -72,8 +73,8 @@ public abstract class HATestUtil {
     
     active.getRpcServer().rollEditLog();
     
-    long start = System.currentTimeMillis();
-    while (System.currentTimeMillis() - start < TestEditLogTailer.NN_LAG_TIMEOUT) {
+    long start = Time.now();
+    while (Time.now() - start < TestEditLogTailer.NN_LAG_TIMEOUT) {
       long nn2HighestTxId = standby.getNamesystem().getFSImage()
         .getLastAppliedTxId();
       if (nn2HighestTxId >= activeTxId) {
@@ -206,13 +207,13 @@ public abstract class HATestUtil {
   
   public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx,
       List<Integer> txids) throws InterruptedException {
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     while (true) {
       try {
         FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids);
         return;
       } catch (AssertionError err) {
-        if (System.currentTimeMillis() - start > 10000) {
+        if (Time.now() - start > 10000) {
           throw err;
         } else {
           Thread.sleep(300);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
Thu Jul 12 19:01:49 2012
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -124,7 +125,7 @@ public class TestFailoverWithBlockTokens
               BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
               // This will make the token invalid, since the password
               // won't match anymore
-              id.setExpiryDate(System.currentTimeMillis() + 10);
+              id.setExpiryDate(Time.now() + 10);
               Token<BlockTokenIdentifier> newToken =
                   new Token<BlockTokenIdentifier>(id.getBytes(),
                       token.getPassword(), token.getKind(), token.getService());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java
Thu Jul 12 19:01:49 2012
@@ -21,12 +21,13 @@ import java.util.ConcurrentModificationE
 import java.util.Iterator;
 import java.util.Random;
 
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
 public class TestGSet {
   private static final Random ran = new Random();
-  private static final long starttime = System.currentTimeMillis();
+  private static final long starttime = Time.now();
 
   private static void print(Object s) {
     System.out.print(s);
@@ -245,7 +246,7 @@ public class TestGSet {
     }
     println("DONE " + test.stat());
 
-    final long s = (System.currentTimeMillis() - starttime)/1000L;
+    final long s = (Time.now() - starttime)/1000L;
     println("total time elapsed=" + s + "s\n");
   }
 
@@ -257,7 +258,7 @@ public class TestGSet {
     final IntData data;
 
     final String info;
-    final long starttime = System.currentTimeMillis();
+    final long starttime = Time.now();
     /** Determine the probability in {@link #check()}. */
     final int denominator;
     int iterate_count = 0;
@@ -382,7 +383,7 @@ public class TestGSet {
     }
 
     String stat() {
-      final long t = System.currentTimeMillis() - starttime;
+      final long t = Time.now() - starttime;
       return String.format(" iterate=%5d, contain=%5d, time elapsed=%5d.%03ds",
           iterate_count, contain_count, t/1000, t%1000);
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
Thu Jul 12 19:01:49 2012
@@ -30,6 +30,7 @@ import static org.junit.Assert.*;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Time;
 
 public class TestLightWeightHashSet{
 
@@ -45,7 +46,7 @@ public class TestLightWeightHashSet{
     float maxF = LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR;
     float minF = LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR;
     int initCapacity = LightWeightHashSet.MINIMUM_CAPACITY;
-    rand = new Random(System.currentTimeMillis());
+    rand = new Random(Time.now());
     list.clear();
     for (int i = 0; i < NUM; i++) {
       list.add(rand.nextInt());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
Thu Jul 12 19:01:49 2012
@@ -29,6 +29,7 @@ import static org.junit.Assert.*;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.util.Time;
 
 public class TestLightWeightLinkedSet {
 
@@ -44,7 +45,7 @@ public class TestLightWeightLinkedSet {
     float maxF = LightWeightLinkedSet.DEFAULT_MAX_LOAD_FACTOR;
     float minF = LightWeightLinkedSet.DEFAUT_MIN_LOAD_FACTOR;
     int initCapacity = LightWeightLinkedSet.MINIMUM_CAPACITY;
-    rand = new Random(System.currentTimeMillis());
+    rand = new Random(Time.now());
     list.clear();
     for (int i = 0; i < NUM; i++) {
       list.add(rand.nextInt());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1360858&r1=1360857&r2=1360858&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
Thu Jul 12 19:01:49 2012
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
@@ -39,7 +40,7 @@ public class TestJsonUtil {
 
   @Test
   public void testHdfsFileStatus() {
-    final long now = System.currentTimeMillis();
+    final long now = Time.now();
     final String parent = "/dir";
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L<<26,
         now, now + 10, new FsPermission((short)0644), "user", "group",



Mime
View raw message