hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gkesa...@apache.org
Subject svn commit: r1369164 [11/16] - in /hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/o...
Date Fri, 03 Aug 2012 19:00:59 GMT
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Fri Aug  3 19:00:15 2012
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.HashSet;
 import java.util.Set;
-import java.net.*;
-
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,12 +36,14 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
 
 
 /**
  * This class tests the replication and injection of blocks of a DFS file for simulated storage.
  */
-public class TestInjectionForSimulatedStorage extends TestCase {
+public class TestInjectionForSimulatedStorage {
   private int checksumSize = 16;
   private int blockSize = checksumSize*2;
   private int numBlocks = 4;
@@ -72,7 +74,7 @@ public class TestInjectionForSimulatedSt
                                        ClientProtocol namenode,
                                        int expected, long maxWaitSec) 
                                        throws IOException {
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     
     //wait for all the blocks to be replicated;
     LOG.info("Checking for block replication for " + filename);
@@ -97,7 +99,7 @@ public class TestInjectionForSimulatedSt
                                actual + ".");
       
         if (maxWaitSec > 0 && 
-            (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
+            (Time.now() - start) > (maxWaitSec * 1000)) {
           throw new IOException("Timedout while waiting for all blocks to " +
                                 " be replicated for " + filename);
         }
@@ -121,6 +123,7 @@ public class TestInjectionForSimulatedSt
    * The blocks are then injected in one of the DNs. The  expected behaviour is
    * that the NN will arrange for themissing replica will be copied from a valid source.
    */
+  @Test
   public void testInjection() throws IOException {
     
     MiniDFSCluster cluster = null;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Fri Aug  3 19:00:15 2012
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
 
-import junit.framework.Assert;
-
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
@@ -78,16 +79,16 @@ public class TestIsMethodSupported {
             nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
             true).getProxy();
     boolean exists = translator.isMethodSupported("rollEditLog");
-    Assert.assertTrue(exists);
+    assertTrue(exists);
     exists = translator.isMethodSupported("bogusMethod");
-    Assert.assertFalse(exists);
+    assertFalse(exists);
   }
   
   @Test
   public void testDatanodeProtocol() throws IOException {
     DatanodeProtocolClientSideTranslatorPB translator = 
         new DatanodeProtocolClientSideTranslatorPB(nnAddress, conf);
-    Assert.assertTrue(translator.isMethodSupported("sendHeartbeat"));
+    assertTrue(translator.isMethodSupported("sendHeartbeat"));
   }
   
   @Test
@@ -97,12 +98,12 @@ public class TestIsMethodSupported {
             UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf));
     //Namenode doesn't implement ClientDatanodeProtocol
-    Assert.assertFalse(translator.isMethodSupported("refreshNamenodes"));
+    assertFalse(translator.isMethodSupported("refreshNamenodes"));
     
     translator = new ClientDatanodeProtocolTranslatorPB(
         dnAddress, UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf));
-    Assert.assertTrue(translator.isMethodSupported("refreshNamenodes"));
+    assertTrue(translator.isMethodSupported("refreshNamenodes"));
   }
   
   @Test
@@ -111,7 +112,7 @@ public class TestIsMethodSupported {
         (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
             conf, nnAddress, ClientProtocol.class,
             UserGroupInformation.getCurrentUser(), true).getProxy();
-    Assert.assertTrue(translator.isMethodSupported("mkdirs"));
+    assertTrue(translator.isMethodSupported("mkdirs"));
   }
   
   @Test
@@ -120,7 +121,7 @@ public class TestIsMethodSupported {
         NameNodeProxies.createNonHAProxy(conf, nnAddress, JournalProtocol.class,
             UserGroupInformation.getCurrentUser(), true).getProxy();
     //Nameode doesn't implement JournalProtocol
-    Assert.assertFalse(translator.isMethodSupported("startLogSegment"));
+    assertFalse(translator.isMethodSupported("startLogSegment"));
   }
   
   @Test
@@ -130,12 +131,12 @@ public class TestIsMethodSupported {
             nnAddress, UserGroupInformation.getCurrentUser(), conf,
             NetUtils.getDefaultSocketFactory(conf), 0);
     //Not supported at namenode
-    Assert.assertFalse(translator.isMethodSupported("initReplicaRecovery"));
+    assertFalse(translator.isMethodSupported("initReplicaRecovery"));
     
     translator = new InterDatanodeProtocolTranslatorPB(
         dnAddress, UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf), 0);
-    Assert.assertTrue(translator.isMethodSupported("initReplicaRecovery"));
+    assertTrue(translator.isMethodSupported("initReplicaRecovery"));
   }
   
   @Test
@@ -145,7 +146,7 @@ public class TestIsMethodSupported {
         NameNodeProxies.createNonHAProxy(conf, nnAddress,
             GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(),
             true).getProxy();
-    Assert.assertTrue(translator.isMethodSupported("getGroupsForUser"));
+    assertTrue(translator.isMethodSupported("getGroupsForUser"));
   }
   
   @Test
@@ -155,7 +156,7 @@ public class TestIsMethodSupported {
       NameNodeProxies.createNonHAProxy(conf, nnAddress,
           RefreshAuthorizationPolicyProtocol.class,
           UserGroupInformation.getCurrentUser(), true).getProxy();
-    Assert.assertTrue(translator.isMethodSupported("refreshServiceAcl"));
+    assertTrue(translator.isMethodSupported("refreshServiceAcl"));
   }
   
   @Test
@@ -165,7 +166,7 @@ public class TestIsMethodSupported {
         NameNodeProxies.createNonHAProxy(conf, nnAddress,
             RefreshUserMappingsProtocol.class,
             UserGroupInformation.getCurrentUser(), true).getProxy();
-    Assert.assertTrue(
+    assertTrue(
         translator.isMethodSupported("refreshUserToGroupsMappings"));
   }
 }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -30,7 +32,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.junit.Test;
-import static org.junit.Assert.assertTrue;
 
 /**
  * This class tests that blocks can be larger than 2GB

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java Fri Aug  3 19:00:15 2012
@@ -17,33 +17,31 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
+
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.doNothing;
 
 public class TestLease {
   static boolean hasLease(MiniDFSCluster cluster, Path src) {
@@ -82,7 +80,8 @@ public class TestLease {
       // We don't need to wait the lease renewer thread to act.
       // call renewLease() manually.
       // make it look like lease has already expired.
-      dfs.lastLeaseRenewal = System.currentTimeMillis() - 300000;
+      LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
+      dfs.lastLeaseRenewal = Time.now() - 300000;
       dfs.renewLease();
 
       // this should not work.
@@ -94,6 +93,10 @@ public class TestLease {
         LOG.info("Write failed as expected. ", e);
       }
 
+      // If aborted, the renewer should be empty. (no reference to clients)
+      Thread.sleep(1000);
+      Assert.assertTrue(originalRenewer.isEmpty());
+
       // unstub
       doNothing().when(spyNN).renewLease(anyString());
 
@@ -167,15 +170,20 @@ public class TestLease {
 
     final Configuration conf = new Configuration();
     final DFSClient c1 = createDFSClientAs(ugi[0], conf);
+    FSDataOutputStream out1 = createFsOut(c1, "/out1");
     final DFSClient c2 = createDFSClientAs(ugi[0], conf);
-    Assert.assertEquals(c1.leaserenewer, c2.leaserenewer);
+    FSDataOutputStream out2 = createFsOut(c2, "/out2");
+    Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
     final DFSClient c3 = createDFSClientAs(ugi[1], conf);
-    Assert.assertTrue(c1.leaserenewer != c3.leaserenewer);
+    FSDataOutputStream out3 = createFsOut(c3, "/out3");
+    Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
     final DFSClient c4 = createDFSClientAs(ugi[1], conf);
-    Assert.assertEquals(c3.leaserenewer, c4.leaserenewer);
+    FSDataOutputStream out4 = createFsOut(c4, "/out4");
+    Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
     final DFSClient c5 = createDFSClientAs(ugi[2], conf);
-    Assert.assertTrue(c1.leaserenewer != c5.leaserenewer);
-    Assert.assertTrue(c3.leaserenewer != c5.leaserenewer);
+    FSDataOutputStream out5 = createFsOut(c5, "/out5");
+    Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
+    Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
   }
   
   private FSDataOutputStream createFsOut(DFSClient dfs, String path) 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java Fri Aug  3 19:00:15 2012
@@ -16,6 +16,8 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
@@ -31,8 +33,9 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.junit.Test;
 
-public class TestLeaseRecovery extends junit.framework.TestCase {
+public class TestLeaseRecovery {
   static final int BLOCK_SIZE = 1024;
   static final short REPLICATION_NUM = (short)3;
   private static final long LEASE_PERIOD = 300L;
@@ -66,6 +69,7 @@ public class TestLeaseRecovery extends j
    * It randomly truncates the replica of the last block stored in each datanode.
    * Finally, it triggers block synchronization to synchronize all stored block.
    */
+  @Test
   public void testBlockSynchronization() throws Exception {
     final int ORG_FILE_SIZE = 3000; 
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Aug  3 19:00:15 2012
@@ -171,7 +171,7 @@ public class TestLeaseRecovery2 {
 
     if (triggerLeaseRenewerInterrupt) {
       AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-      dfs.dfs.leaserenewer.interruptAndJoin();
+      dfs.dfs.getLeaseRenewer().interruptAndJoin();
     }
     return filepath;
   }
@@ -276,7 +276,7 @@ public class TestLeaseRecovery2 {
     
     // kill the lease renewal thread
     AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-    dfs.dfs.leaserenewer.interruptAndJoin();
+    dfs.dfs.getLeaseRenewer().interruptAndJoin();
 
     // set the hard limit to be 1 second 
     cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
@@ -341,7 +341,7 @@ public class TestLeaseRecovery2 {
     AppendTestUtil.LOG.info("hflush");
     stm.hflush();
     AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-    dfs.dfs.leaserenewer.interruptAndJoin();
+    dfs.dfs.getLeaseRenewer().interruptAndJoin();
 
     // set the soft limit to be 1 second so that the
     // namenode triggers lease recovery on next attempt to write-for-open.
@@ -463,7 +463,7 @@ public class TestLeaseRecovery2 {
     
     // kill the lease renewal thread
     AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-    dfs.dfs.leaserenewer.interruptAndJoin();
+    dfs.dfs.getLeaseRenewer().interruptAndJoin();
     
     // Make sure the DNs don't send a heartbeat for a while, so the blocks
     // won't actually get completed during lease recovery.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRenewer.java Fri Aug  3 19:00:15 2012
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertSame;
 
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -93,13 +93,6 @@ public class TestLeaseRenewer {
   }
   
   @Test
-  public void testClientName() throws IOException {
-    String clientName = renewer.getClientName("NONMAPREDUCE");
-    Assert.assertTrue("bad client name: " + clientName,
-        clientName.startsWith("DFSClient_NONMAPREDUCE_"));
-  }
-  
-  @Test
   public void testRenewal() throws Exception {
     // Keep track of how many times the lease gets renewed
     final AtomicInteger leaseRenewalCount = new AtomicInteger();
@@ -118,8 +111,8 @@ public class TestLeaseRenewer {
     renewer.put(filePath, mockStream, MOCK_DFSCLIENT);
 
     // Wait for lease to get renewed
-    long failTime = System.currentTimeMillis() + 5000;
-    while (System.currentTimeMillis() < failTime &&
+    long failTime = Time.now() + 5000;
+    while (Time.now() < failTime &&
         leaseRenewalCount.get() == 0) {
       Thread.sleep(50);
     }
@@ -206,8 +199,8 @@ public class TestLeaseRenewer {
     renewer.closeFile(filePath, MOCK_DFSCLIENT);
     
     // Should stop the renewer running within a few seconds
-    long failTime = System.currentTimeMillis() + 5000;
-    while (renewer.isRunning() && System.currentTimeMillis() < failTime) {
+    long failTime = Time.now() + 5000;
+    while (renewer.isRunning() && Time.now() < failTime) {
       Thread.sleep(50);
     }
     Assert.assertFalse(renewer.isRunning());

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java Fri Aug  3 19:00:15 2012
@@ -22,7 +22,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestListFiles;
 import org.apache.log4j.Level;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.Random;
@@ -33,13 +37,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.log4j.Level;
-
-import static org.junit.Assert.*;
-
 import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Test;
 import org.junit.BeforeClass;
+import org.junit.Test;
 
 /**
  * This class tests the FileStatus API.

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java Fri Aug  3 19:00:15 2012
@@ -17,12 +17,17 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.junit.Test;
-import static org.junit.Assert.*;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.junit.Test;
 
 /**
  * This class tests the DFS class via the FileSystem interface in a single node

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java Fri Aug  3 19:00:15 2012
@@ -18,15 +18,17 @@
 
 package org.apache.hadoop.hdfs;
 
-import junit.framework.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.File;
-
 /**
  * Tests MiniDFS cluster setup/teardown and isolation.
  * Every instance is brought up with a new data dir, to ensure that
@@ -70,7 +72,7 @@ public class TestMiniDFSCluster {
     conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
-      Assert.assertEquals(c1Path+"/data", cluster.getDataDirectory());
+      assertEquals(c1Path+"/data", cluster.getDataDirectory());
     } finally {
       cluster.shutdown();
     }
@@ -91,14 +93,14 @@ public class TestMiniDFSCluster {
     MiniDFSCluster cluster3 = null;
     try {
       String dataDir2 = cluster2.getDataDirectory();
-      Assert.assertEquals(c2Path + "/data", dataDir2);
+      assertEquals(c2Path + "/data", dataDir2);
       //change the data dir
       conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
                testDataCluster3.getAbsolutePath());
       MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
       cluster3 = builder.build();
       String dataDir3 = cluster3.getDataDirectory();
-      Assert.assertTrue("Clusters are bound to the same directory: " + dataDir2,
+      assertTrue("Clusters are bound to the same directory: " + dataDir2,
                         !dataDir2.equals(dataDir3));
     } finally {
       MiniDFSCluster.shutdownCluster(cluster3);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Fri Aug  3 19:00:15 2012
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.net.URL;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -30,17 +32,19 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.junit.Test;
 
 /**
  * The test makes sure that NameNode detects presense blocks that do not have
  * any valid replicas. In addition, it verifies that HDFS front page displays
  * a warning in such a case.
  */
-public class TestMissingBlocksAlert extends TestCase {
+public class TestMissingBlocksAlert {
   
   private static final Log LOG = 
                            LogFactory.getLog(TestMissingBlocksAlert.class);
   
+  @Test
   public void testMissingBlocksAlert() throws IOException, 
                                        InterruptedException {
     

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java Fri Aug  3 19:00:15 2012
@@ -17,23 +17,27 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.Random;
-import java.net.*;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.junit.Test;
 
 /**
  * This class tests the decommissioning of nodes.
  */
-public class TestModTime extends TestCase {
+public class TestModTime {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   static final int fileSize = 16384;
@@ -74,6 +78,7 @@ public class TestModTime extends TestCas
   /**
    * Tests modification time in DFS.
    */
+  @Test
   public void testModTime() throws IOException {
     Configuration conf = new HdfsConfiguration();
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java Fri Aug  3 19:00:15 2012
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.junit.Test;
-
-import java.io.*;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
@@ -28,15 +26,16 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
 /**
  * This class tests hflushing concurrently from many threads.
@@ -92,6 +91,7 @@ public class TestMultiThreadedHflush {
       this.countdown = countdown;
     }
 
+    @Override
     public void run() {
       try {
         countdown.await();
@@ -151,6 +151,7 @@ public class TestMultiThreadedHflush {
     try {
       for (int i = 0; i < 10; i++) {
         Thread flusher = new Thread() {
+            @Override
             public void run() {
               try {
                 while (true) {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java Fri Aug  3 19:00:15 2012
@@ -17,20 +17,22 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 
-import static org.junit.Assert.*;
-
 /**
  * Driver class for testing the use of DFSInputStream by multiple concurrent
  * readers, using the different read APIs. See subclasses for the actual test
@@ -61,7 +63,7 @@ public class TestParallelReadUtil {
   public static void setupCluster(int replicationFactor, HdfsConfiguration conf) throws Exception {
     util = new BlockReaderTestUtil(replicationFactor, conf);
     dfsClient = util.getDFSClient();
-    long seed = System.currentTimeMillis();
+    long seed = Time.now();
     LOG.info("Random seed: " + seed);
     rand = new Random(seed);
   }
@@ -321,7 +323,7 @@ public class TestParallelReadUtil {
     }
 
     // Start the workers and wait
-    long starttime = System.currentTimeMillis();
+    long starttime = Time.now();
     for (ReadWorker worker : workers) {
       worker.start();
     }
@@ -331,7 +333,7 @@ public class TestParallelReadUtil {
         worker.join();
       } catch (InterruptedException ignored) { }
     }
-    long endtime = System.currentTimeMillis();
+    long endtime = Time.now();
 
     // Cleanup
     for (TestFileInfo testInfo : testInfoArr) {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java Fri Aug  3 19:00:15 2012
@@ -72,10 +72,25 @@ public class TestPersistBlocks {
     rand.nextBytes(DATA_BEFORE_RESTART);
     rand.nextBytes(DATA_AFTER_RESTART);
   }
+ 
+  /** check if DFS remains in proper condition after a restart 
+   **/
+  @Test  
+  public void TestRestartDfsWithFlush() throws Exception {
+    testRestartDfs(true);
+  }
   
-  /** check if DFS remains in proper condition after a restart */
-  @Test
-  public void testRestartDfs() throws Exception {
+  
+  /** check if DFS remains in proper condition after a restart 
+   **/
+  public void TestRestartDfsWithSync() throws Exception {
+    testRestartDfs(false);
+  }
+  
+  /** check if DFS remains in proper condition after a restart
+   * @param useFlush - if true then flush is used instead of sync (ie hflush)
+   */
+  void testRestartDfs(boolean useFlush) throws Exception {
     final Configuration conf = new HdfsConfiguration();
     // Turn off persistent IPC, so that the DFSClient can survive NN restart
     conf.setInt(
@@ -92,7 +107,10 @@ public class TestPersistBlocks {
       // Creating a file with 4096 blockSize to write multiple blocks
       stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
       stream.write(DATA_BEFORE_RESTART);
-      stream.hflush();
+      if (useFlush)
+        stream.flush();
+      else 
+        stream.hflush();
       
       // Wait for at least a few blocks to get through
       while (len <= BLOCK_SIZE) {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Fri Aug  3 19:00:15 2012
@@ -17,24 +17,25 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
 
 /**
  * This class tests the DFS positional read functionality in a single node
  * mini-cluster.
  */
-public class TestPread extends TestCase {
+public class TestPread {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 4096;
   boolean simulatedStorage = false;
@@ -196,6 +197,7 @@ public class TestPread extends TestCase 
   /**
    * Tests positional read in DFS.
    */
+  @Test
   public void testPreadDFS() throws IOException {
     dfsPreadTest(false); //normal pread
     dfsPreadTest(true); //trigger read code path without transferTo.
@@ -225,6 +227,7 @@ public class TestPread extends TestCase 
     }
   }
   
+  @Test
   public void testPreadDFSSimulated() throws IOException {
     simulatedStorage = true;
     testPreadDFS();
@@ -234,6 +237,7 @@ public class TestPread extends TestCase 
   /**
    * Tests positional read in LocalFS.
    */
+  @Test
   public void testPreadLocalFS() throws IOException {
     Configuration conf = new HdfsConfiguration();
     FileSystem fileSys = FileSystem.getLocal(conf);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java Fri Aug  3 19:00:15 2012
@@ -88,7 +88,7 @@ public class TestReadWhileWriting {
       //   of data can be read successfully.
       checkFile(p, half, conf);
       AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
-      ((DistributedFileSystem)fs).dfs.leaserenewer.interruptAndJoin();
+      ((DistributedFileSystem)fs).dfs.getLeaseRenewer().interruptAndJoin();
 
       //c. On M1, append another half block of data.  Close file on M1.
       {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Fri Aug  3 19:00:15 2012
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
@@ -29,8 +30,9 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
-public class TestRenameWhileOpen extends junit.framework.TestCase {
+public class TestRenameWhileOpen {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -47,6 +49,7 @@ public class TestRenameWhileOpen extends
    * mkdir /user/dir3
    * move /user/dir1 /user/dir3
    */
+  @Test
   public void testWhileOpenRenameParent() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
@@ -132,6 +135,7 @@ public class TestRenameWhileOpen extends
    * open /user/dir1/file1 /user/dir2/file2
    * move /user/dir1 /user/dir3
    */
+  @Test
   public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
@@ -206,6 +210,7 @@ public class TestRenameWhileOpen extends
    * mkdir /user/dir2
    * move /user/dir1/file1 /user/dir2/
    */
+  @Test
   public void testWhileOpenRenameToExistentDirectory() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
@@ -270,6 +275,7 @@ public class TestRenameWhileOpen extends
    * open /user/dir1/file1 
    * move /user/dir1/file1 /user/dir2/
    */
+  @Test
   public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Fri Aug  3 19:00:15 2012
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Fri Aug  3 19:00:15 2012
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
@@ -25,8 +28,6 @@ import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -39,15 +40,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
 
 /**
  * This class tests the replication of a DFS file.
  */
-public class TestReplication extends TestCase {
+public class TestReplication {
   private static final long seed = 0xDEADBEEFL;
   private static final int blockSize = 8192;
   private static final int fileSize = 16384;
@@ -148,6 +151,7 @@ public class TestReplication extends Tes
   /* 
    * Test if Datanode reports bad blocks during replication request
    */
+  @Test
   public void testBadBlockReportOnTransfer() throws Exception {
     Configuration conf = new HdfsConfiguration();
     FileSystem fs = null;
@@ -239,11 +243,13 @@ public class TestReplication extends Tes
   }
 
 
+  @Test
   public void testReplicationSimulatedStorag() throws IOException {
     runReplication(true);
   }
   
   
+  @Test
   public void testReplication() throws IOException {
     runReplication(false);
   }
@@ -253,7 +259,7 @@ public class TestReplication extends Tes
                                        ClientProtocol namenode,
                                        int expected, long maxWaitSec) 
                                        throws IOException {
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     
     //wait for all the blocks to be replicated;
     LOG.info("Checking for block replication for " + filename);
@@ -279,7 +285,7 @@ public class TestReplication extends Tes
       }
       
       if (maxWaitSec > 0 && 
-          (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
+          (Time.now() - start) > (maxWaitSec * 1000)) {
         throw new IOException("Timedout while waiting for all blocks to " +
                               " be replicated for " + filename);
       }
@@ -297,6 +303,7 @@ public class TestReplication extends Tes
    * two of the blocks and removes one of the replicas. Expected behavior is
    * that missing replica will be copied from one valid source.
    */
+  @Test
   public void testPendingReplicationRetry() throws IOException {
     
     MiniDFSCluster cluster = null;
@@ -399,6 +406,7 @@ public class TestReplication extends Tes
    * Test if replication can detect mismatched length on-disk blocks
    * @throws Exception
    */
+  @Test
   public void testReplicateLenMismatchedBlock() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
     try {

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java Fri Aug  3 19:00:15 2012
@@ -18,17 +18,19 @@
 
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.junit.Test;
 
 /**
  * A JUnit test for checking if restarting DFS preserves integrity.
  */
-public class TestRestartDFS extends TestCase {
+public class TestRestartDFS {
   public void runTests(Configuration conf, boolean serviceTest) throws Exception {
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil.Builder().setName("TestRestartDFS").
@@ -110,6 +112,7 @@ public class TestRestartDFS extends Test
     }
   }
   /** check if DFS remains in proper condition after a restart */
+  @Test
   public void testRestartDFS() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     runTests(conf, false);
@@ -118,6 +121,7 @@ public class TestRestartDFS extends Test
   /** check if DFS remains in proper condition after a restart 
    * this rerun is with 2 ports enabled for RPC in the namenode
    */
+  @Test
    public void testRestartDualPortDFS() throws Exception {
      final Configuration conf = new HdfsConfiguration();
      runTests(conf, true);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Fri Aug  3 19:00:15 2012
@@ -18,30 +18,33 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.junit.Assert.*;
-import org.junit.Before;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.base.Supplier;
@@ -278,36 +281,43 @@ public class TestSafeMode {
         dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
 
     runFsFun("Set quota while in SM", new FSRun() { 
+      @Override
       public void run(FileSystem fs) throws IOException {
         ((DistributedFileSystem)fs).setQuota(file1, 1, 1); 
       }});
 
     runFsFun("Set perm while in SM", new FSRun() {
+      @Override
       public void run(FileSystem fs) throws IOException {
         fs.setPermission(file1, FsPermission.getDefault());
       }});
 
     runFsFun("Set owner while in SM", new FSRun() {
+      @Override
       public void run(FileSystem fs) throws IOException {
         fs.setOwner(file1, "user", "group");
       }});
 
     runFsFun("Set repl while in SM", new FSRun() {
+      @Override
       public void run(FileSystem fs) throws IOException {
         fs.setReplication(file1, (short)1);
       }});
 
     runFsFun("Append file while in SM", new FSRun() {
+      @Override
       public void run(FileSystem fs) throws IOException {
         DFSTestUtil.appendFile(fs, file1, "new bytes");
       }});
 
     runFsFun("Delete file while in SM", new FSRun() {
+      @Override
       public void run(FileSystem fs) throws IOException {
         fs.delete(file1, false);
       }});
 
     runFsFun("Rename file while in SM", new FSRun() {
+      @Override
       public void run(FileSystem fs) throws IOException {
         fs.rename(file1, new Path("file2"));
       }});

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java Fri Aug  3 19:00:15 2012
@@ -17,24 +17,26 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumFileSystem;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
+import org.junit.Test;
 
 /**
  * This class tests the presence of seek bug as described
  * in HADOOP-508 
  */
-public class TestSeekBug extends TestCase {
+public class TestSeekBug {
   static final long seed = 0xDEADBEEFL;
   static final int ONEMB = 1 << 20;
   
@@ -123,6 +125,7 @@ public class TestSeekBug extends TestCas
   /**
    * Test if the seek bug exists in FSDataInputStream in DFS.
    */
+  @Test
   public void testSeekBugDFS() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -142,6 +145,7 @@ public class TestSeekBug extends TestCas
   /**
    * Tests if the seek bug exists in FSDataInputStream in LocalFS.
    */
+  @Test
   public void testSeekBugLocalFS() throws IOException {
     Configuration conf = new HdfsConfiguration();
     FileSystem fileSys = FileSystem.getLocal(conf);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java Fri Aug  3 19:00:15 2012
@@ -17,26 +17,33 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.text.SimpleDateFormat;
+import java.util.Date;
 import java.util.Random;
-import java.net.*;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
 
 /**
  * This class tests the access time on files.
  *
  */
-public class TestSetTimes extends TestCase {
+public class TestSetTimes {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   static final int fileSize = 16384;
@@ -77,6 +84,7 @@ public class TestSetTimes extends TestCa
   /**
    * Tests mod & access time in DFS.
    */
+  @Test
   public void testTimes() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
@@ -159,8 +167,8 @@ public class TestSetTimes extends TestCa
       assertTrue(atime2 == stat.getAccessTime());
       assertTrue(mtime2 == mtime3);
 
-      long mtime4 = System.currentTimeMillis() - (3600L * 1000L);
-      long atime4 = System.currentTimeMillis();
+      long mtime4 = Time.now() - (3600L * 1000L);
+      long atime4 = Time.now();
       fileSys.setTimes(dir1, mtime4, atime4);
       // check new modification time on file
       stat = fileSys.getFileStatus(dir1);
@@ -208,6 +216,7 @@ public class TestSetTimes extends TestCa
   /**
    * Tests mod time change at close in DFS.
    */
+  @Test
   public void testTimesAtClose() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java Fri Aug  3 19:00:15 2012
@@ -19,9 +19,10 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
-import junit.framework.TestCase;
+import org.junit.Test;
 
-public class TestSetrepDecreasing extends TestCase {
+public class TestSetrepDecreasing {
+  @Test
   public void testSetrepDecreasing() throws IOException {
     TestSetrepIncreasing.setrep(5, 3, false);
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java Fri Aug  3 19:00:15 2012
@@ -17,14 +17,21 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
 
-public class TestSetrepIncreasing extends TestCase {
+public class TestSetrepIncreasing {
   static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
@@ -68,9 +75,11 @@ public class TestSetrepIncreasing extend
     }
   }
 
+  @Test
   public void testSetrepIncreasing() throws IOException {
     setrep(3, 7, false);
   }
+  @Test
   public void testSetrepIncreasingSimulatedStorage() throws IOException {
     setrep(3, 7, true);
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Fri Aug  3 19:00:15 2012
@@ -40,8 +40,10 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -94,8 +96,7 @@ public class TestShortCircuitLocalRead {
     // Now read using a different API.
     actual = new byte[expected.length-readOffset];
     stm = fs.open(name);
-    long skipped = stm.skip(readOffset);
-    Assert.assertEquals(skipped, readOffset);
+    IOUtils.skipFully(stm, readOffset);
     //Read a small number of bytes first.
     int nread = stm.read(actual, 0, 3);
     nread += stm.read(actual, nread, 2);
@@ -123,8 +124,7 @@ public class TestShortCircuitLocalRead {
 
     ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
 
-    long skipped = stm.skip(readOffset);
-    Assert.assertEquals(skipped, readOffset);
+    IOUtils.skipFully(stm, readOffset);
 
     actual.limit(3);
 
@@ -363,7 +363,7 @@ public class TestShortCircuitLocalRead {
     stm.write(dataToWrite);
     stm.close();
 
-    long start = System.currentTimeMillis();
+    long start = Time.now();
     final int iteration = 20;
     Thread[] threads = new Thread[threadCount];
     for (int i = 0; i < threadCount; i++) {
@@ -386,7 +386,7 @@ public class TestShortCircuitLocalRead {
     for (int i = 0; i < threadCount; i++) {
       threads[i].join();
     }
-    long end = System.currentTimeMillis();
+    long end = Time.now();
     System.out.println("Iteration " + iteration + " took " + (end - start));
     fs.delete(file1, false);
   }

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java Fri Aug  3 19:00:15 2012
@@ -17,23 +17,27 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
 import java.util.Random;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
 
 /**
  * This class tests the creation of files with block-size
  * smaller than the default buffer size of 4K.
  */
-public class TestSmallBlock extends TestCase {
+public class TestSmallBlock {
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 1;
   static final int fileSize = 20;
@@ -90,6 +94,7 @@ public class TestSmallBlock extends Test
   /**
    * Tests small block size in in DFS.
    */
+  @Test
   public void testSmallBlock() throws IOException {
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
@@ -108,6 +113,7 @@ public class TestSmallBlock extends Test
       cluster.shutdown();
     }
   }
+  @Test
   public void testSmallBlockSimulatedStorage() throws IOException {
     simulatedStorage = true;
     testSmallBlock();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java Fri Aug  3 19:00:15 2012
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import java.io.OutputStream;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
-
-import java.io.OutputStream;
 import org.junit.Test;
 
 /**

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Fri Aug  3 19:00:15 2012
@@ -20,6 +20,9 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -28,24 +31,21 @@ import java.net.URI;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.zip.CRC32;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
-
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 
 import org.junit.Test;
 

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java Fri Aug  3 19:00:15 2012
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 import java.util.EnumSet;
 
-import static org.junit.Assert.*;
-import org.junit.Test;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.junit.Test;
 
 /**
  * Test for {@link LayoutVersion}

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Fri Aug  3 19:00:15 2012
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocolPB;
 
-import static junit.framework.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -27,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
@@ -57,10 +58,10 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -79,6 +80,12 @@ import com.google.common.collect.Lists;
  * Tests for {@link PBHelper}
  */
 public class TestPBHelper {
+
+  /**
+   * Used for asserting equality on doubles.
+   */
+  private static final double DELTA = 0.000001;
+
   @Test
   public void testConvertNamenodeRole() {
     assertEquals(NamenodeRoleProto.BACKUP,
@@ -283,11 +290,12 @@ public class TestPBHelper {
   private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
       assertEquals(dn1.getAdminState(), dn2.getAdminState());
       assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
-      assertEquals(dn1.getBlockPoolUsedPercent(), dn2.getBlockPoolUsedPercent());
+      assertEquals(dn1.getBlockPoolUsedPercent(), 
+          dn2.getBlockPoolUsedPercent(), DELTA);
       assertEquals(dn1.getCapacity(), dn2.getCapacity());
       assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
       assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
-      assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
+      assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
       assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
       assertEquals(dn1.getHostName(), dn2.getHostName());
       assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
@@ -401,9 +409,9 @@ public class TestPBHelper {
   @Test
   public void testConvertLocatedBlock() {
     DatanodeInfo [] dnInfos = {
-        DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
-        DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED),
-        DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL)
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED),
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL)
     };
     LocatedBlock lb = new LocatedBlock(
         new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Fri Aug  3 19:00:15 2012
@@ -20,7 +20,8 @@ package org.apache.hadoop.hdfs.security;
 
 
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -241,6 +242,7 @@ public class TestDelegationToken {
     final UserGroupInformation shortUgi = UserGroupInformation
         .createRemoteUser("JobTracker");
     longUgi.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
       public Object run() throws IOException {
         final DistributedFileSystem dfs = (DistributedFileSystem) cluster
             .getFileSystem();
@@ -254,6 +256,7 @@ public class TestDelegationToken {
       }
     });
     shortUgi.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
       public Object run() throws IOException {
         final DistributedFileSystem dfs = (DistributedFileSystem) cluster
             .getFileSystem();
@@ -262,6 +265,7 @@ public class TestDelegationToken {
       }
     });
     longUgi.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
       public Object run() throws IOException {
         final DistributedFileSystem dfs = (DistributedFileSystem) cluster
             .getFileSystem();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Fri Aug  3 19:00:15 2012
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -138,10 +137,9 @@ public class TestDelegationTokenForProxy
     try {
       Token<DelegationTokenIdentifier> token = proxyUgi
           .doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
+            @Override
             public Token<DelegationTokenIdentifier> run() throws IOException {
-              DistributedFileSystem dfs = (DistributedFileSystem) cluster
-                  .getFileSystem();
-              return dfs.getDelegationToken("RenewerUser");
+              return cluster.getFileSystem().getDelegationToken("RenewerUser");
             }
           });
       DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
@@ -205,7 +203,7 @@ public class TestDelegationTokenForProxy
       final PutOpParam.Op op = PutOpParam.Op.CREATE;
       final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
       final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
       out.write("Hello, webhdfs user!".getBytes());
       out.close();
@@ -220,7 +218,7 @@ public class TestDelegationTokenForProxy
       final PostOpParam.Op op = PostOpParam.Op.APPEND;
       final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
       final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
       out.write("\nHello again!".getBytes());
       out.close();

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java Fri Aug  3 19:00:15 2012
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.security.
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.security.token.Token;
 
 /** Utilities for security tests */

Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1369164&r1=1369163&r2=1369164&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Fri Aug  3 19:00:15 2012
@@ -70,6 +70,7 @@ import org.apache.hadoop.security.SaslRp
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -300,8 +301,8 @@ public class TestBlockToken {
 
     int fdsAtStart = countOpenFileDescriptors();
     try {
-      long endTime = System.currentTimeMillis() + 3000;
-      while (System.currentTimeMillis() < endTime) {
+      long endTime = Time.now() + 3000;
+      while (Time.now() < endTime) {
         proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
             fakeBlock);
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));



Mime
View raw message