hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1399950 [18/27] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apac...
Date Fri, 19 Oct 2012 02:28:07 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Fri Oct 19 02:25:55 2012
@@ -17,37 +17,34 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
-import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSInputStream;
-import org.apache.hadoop.hdfs.SocketCache;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.io.IOUtils;
-
 import org.apache.hadoop.security.token.Token;
-import org.junit.Test;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import static org.junit.Assert.*;
-
+import org.junit.Test;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
-import org.mockito.stubbing.Answer;
 import org.mockito.invocation.InvocationOnMock;
-import static org.mockito.Mockito.spy;
+import org.mockito.stubbing.Answer;
 
 /**
  * This class tests the client connection caching in a single node
@@ -58,10 +55,12 @@ public class TestConnCache {
 
   static final int BLOCK_SIZE = 4096;
   static final int FILE_SIZE = 3 * BLOCK_SIZE;
-
+  final static int CACHE_SIZE = 4;
+  final static long CACHE_EXPIRY_MS = 200;
   static Configuration conf = null;
   static MiniDFSCluster cluster = null;
   static FileSystem fs = null;
+  static SocketCache cache;
 
   static final Path testFile = new Path("/testConnCache.dat");
   static byte authenticData[] = null;
@@ -79,6 +78,7 @@ public class TestConnCache {
     public RemoteBlockReader2 reader = null;
     private Socket sock = null;
 
+    @Override
     public RemoteBlockReader2 answer(InvocationOnMock invocation) throws Throwable {
       RemoteBlockReader2 prevReader = reader;
       reader = (RemoteBlockReader2) invocation.callRealMethod();
@@ -96,6 +96,9 @@ public class TestConnCache {
   public static void setupCluster() throws Exception {
     final int REPLICATION_FACTOR = 1;
 
+    /* create a socket cache. There is only one socket cache per jvm */
+    cache = SocketCache.getInstance(CACHE_SIZE, CACHE_EXPIRY_MS);
+
     util = new BlockReaderTestUtil(REPLICATION_FACTOR);
     cluster = util.getCluster();
     conf = util.getConf();
@@ -145,10 +148,7 @@ public class TestConnCache {
    * Test the SocketCache itself.
    */
   @Test
-  public void testSocketCache() throws IOException {
-    final int CACHE_SIZE = 4;
-    SocketCache cache = new SocketCache(CACHE_SIZE);
-
+  public void testSocketCache() throws Exception {
     // Make a client
     InetSocketAddress nnAddr =
         new InetSocketAddress("localhost", cluster.getNameNodePort());
@@ -162,6 +162,7 @@ public class TestConnCache {
     DataNode dn = util.getDataNode(block);
     InetSocketAddress dnAddr = dn.getXferAddress();
 
+
     // Make some sockets to the DN
     Socket[] dnSockets = new Socket[CACHE_SIZE];
     for (int i = 0; i < dnSockets.length; ++i) {
@@ -169,29 +170,75 @@ public class TestConnCache {
           dnAddr.getAddress(), dnAddr.getPort());
     }
 
+
     // Insert a socket to the NN
     Socket nnSock = new Socket(nnAddr.getAddress(), nnAddr.getPort());
-    cache.put(nnSock);
-    assertSame("Read the write", nnSock, cache.get(nnAddr));
-    cache.put(nnSock);
+    cache.put(nnSock, null);
+    assertSame("Read the write", nnSock, cache.get(nnAddr).sock);
+    cache.put(nnSock, null);
 
     // Insert DN socks
     for (Socket dnSock : dnSockets) {
-      cache.put(dnSock);
+      cache.put(dnSock, null);
     }
 
     assertEquals("NN socket evicted", null, cache.get(nnAddr));
     assertTrue("Evicted socket closed", nnSock.isClosed());
-
+ 
     // Lookup the DN socks
     for (Socket dnSock : dnSockets) {
-      assertEquals("Retrieve cached sockets", dnSock, cache.get(dnAddr));
+      assertEquals("Retrieve cached sockets", dnSock, cache.get(dnAddr).sock);
       dnSock.close();
     }
 
     assertEquals("Cache is empty", 0, cache.size());
   }
 
+
+  /**
+   * Test the SocketCache expiry.
+   * Verify that socket cache entries expire after the set
+   * expiry time.
+   */
+  @Test
+  public void testSocketCacheExpiry() throws Exception {
+    // Make a client
+    InetSocketAddress nnAddr =
+        new InetSocketAddress("localhost", cluster.getNameNodePort());
+    DFSClient client = new DFSClient(nnAddr, conf);
+
+    // Find out the DN addr
+    LocatedBlock block =
+        client.getNamenode().getBlockLocations(
+            testFile.toString(), 0, FILE_SIZE)
+        .getLocatedBlocks().get(0);
+    DataNode dn = util.getDataNode(block);
+    InetSocketAddress dnAddr = dn.getXferAddress();
+
+
+    // Make some sockets to the DN and put in cache
+    Socket[] dnSockets = new Socket[CACHE_SIZE];
+    for (int i = 0; i < dnSockets.length; ++i) {
+      dnSockets[i] = client.socketFactory.createSocket(
+          dnAddr.getAddress(), dnAddr.getPort());
+      cache.put(dnSockets[i], null);
+    }
+
+    // Client side still has the sockets cached
+    assertEquals(CACHE_SIZE, client.socketCache.size());
+
+    //sleep for a second and see if it expired
+    Thread.sleep(CACHE_EXPIRY_MS + 1000);
+    
+    // Client side has no sockets cached
+    assertEquals(0, client.socketCache.size());
+
+    //sleep for another second and see if 
+    //the daemon thread runs fine on empty cache
+    Thread.sleep(CACHE_EXPIRY_MS + 1000);
+  }
+
+
   /**
    * Read a file served entirely from one DN. Seek around and read from
    * different offsets. And verify that they all use the same socket.
@@ -232,33 +279,6 @@ public class TestConnCache {
 
     in.close();
   }
-  
-  /**
-   * Test that the socket cache can be disabled by setting the capacity to
-   * 0. Regression test for HDFS-3365.
-   */
-  @Test
-  public void testDisableCache() throws IOException {
-    LOG.info("Starting testDisableCache()");
-
-    // Reading with the normally configured filesystem should
-    // cache a socket.
-    DFSTestUtil.readFile(fs, testFile);
-    assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size());
-    
-    // Configure a new instance with no caching, ensure that it doesn't
-    // cache anything
-    Configuration confWithoutCache = new Configuration(fs.getConf());
-    confWithoutCache.setInt(
-        DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
-    FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache);
-    try {
-      DFSTestUtil.readFile(fsWithoutCache, testFile);
-      assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
-    } finally {
-      fsWithoutCache.close();
-    }
-  }
 
   @AfterClass
   public static void teardownCluster() throws Exception {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java Fri Oct 19 02:25:55 2012
@@ -18,21 +18,23 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.File;
-import java.io.RandomAccessFile;
 import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
 import java.util.Random;
 
-import org.junit.Test;
-import static org.junit.Assert.*;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.io.IOUtils;
+import org.junit.Test;
 
 /**
  * A JUnit test for corrupted file handling.

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java Fri Oct 19 02:25:55 2012
@@ -24,20 +24,25 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.junit.Test;
 
 
-public class TestDFSAddressConfig extends TestCase {
+public class TestDFSAddressConfig {
 
+  @Test
   public void testDFSAddressConfig() throws IOException {
     Configuration conf = new HdfsConfiguration();
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Fri Oct 19 02:25:55 2012
@@ -17,15 +17,15 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.io.OutputStream;
 
-import org.junit.*;
-import static org.junit.Assert.fail;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.junit.Test;
 
 
 /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyString;
@@ -36,15 +41,15 @@ import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
@@ -64,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
@@ -72,8 +78,13 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
@@ -85,7 +96,7 @@ import com.google.common.base.Joiner;
  * These tests make sure that DFSClient retries fetching data from DFS
  * properly in case of errors.
  */
-public class TestDFSClientRetries extends TestCase {
+public class TestDFSClientRetries {
   private static final String ADDRESS = "0.0.0.0";
   final static private int PING_INTERVAL = 1000;
   final static private int MIN_SLEEP_TIME = 1000;
@@ -145,6 +156,7 @@ public class TestDFSClientRetries extend
    * This makes sure that when DN closes clients socket after client had
    * successfully connected earlier, the data can still be fetched.
    */
+  @Test
   public void testWriteTimeoutAtDataNode() throws IOException,
                                                   InterruptedException { 
     final int writeTimeout = 100; //milliseconds.
@@ -197,6 +209,7 @@ public class TestDFSClientRetries extend
    * of times trying to add a block
    */
   @SuppressWarnings("serial")
+  @Test
   public void testNotYetReplicatedErrors() throws IOException
   { 
     final String exceptionMsg = "Nope, not replicated yet...";
@@ -241,13 +254,15 @@ public class TestDFSClientRetries extend
    * operation, and not over the lifetime of the stream. It is a regression
    * test for HDFS-127.
    */
+  @Test
   public void testFailuresArePerOperation() throws Exception
   {
     long fileSize = 4096;
     Path file = new Path("/testFile");
 
-    // Set short retry timeout so this test runs faster
+    // Set short retry timeouts so this test runs faster
     conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 
     try {
@@ -316,6 +331,7 @@ public class TestDFSClientRetries extend
    * a client to safely retry a call and still produce a correct
    * file. See HDFS-3031.
    */
+  @Test
   public void testIdempotentAllocateBlockAndClose() throws Exception {
     final String src = "/testIdempotentAllocateBlock";
     Path file = new Path(src);
@@ -419,6 +435,7 @@ public class TestDFSClientRetries extend
       this.realNN = preSpyNN;
     }
 
+    @Override
     public LocatedBlocks answer(InvocationOnMock invocation) throws IOException {
       Object args[] = invocation.getArguments();
       LocatedBlocks realAnswer = realNN.getBlockLocations(
@@ -455,6 +472,7 @@ public class TestDFSClientRetries extend
   /**
    * Test that a DFSClient waits for random time before retry on busy blocks.
    */
+  @Test
   public void testDFSClientRetriesOnBusyBlocks() throws IOException {
     
     System.out.println("Testing DFSClient random waiting on busy blocks.");
@@ -483,9 +501,9 @@ public class TestDFSClientRetries extend
     //
     // Test 1: might fail
     // 
-    long timestamp = System.currentTimeMillis();
+    long timestamp = Time.now();
     boolean pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
-    long timestamp2 = System.currentTimeMillis();
+    long timestamp2 = Time.now();
     if ( pass ) {
       LOG.info("Test 1 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
     } else {
@@ -496,9 +514,9 @@ public class TestDFSClientRetries extend
     // Test 2: should never fail
     // 
     retries = 50;
-    timestamp = System.currentTimeMillis();
+    timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
-    timestamp2 = System.currentTimeMillis();
+    timestamp2 = Time.now();
     assertTrue("Something wrong! Test 2 got Exception with maxmum retries!", pass);
     LOG.info("Test 2 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
     
@@ -507,9 +525,9 @@ public class TestDFSClientRetries extend
     // 
     retries = 3;
     timeWin = 1000;
-    timestamp = System.currentTimeMillis();
+    timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
-    timestamp2 = System.currentTimeMillis();
+    timestamp2 = Time.now();
     if ( pass ) {
       LOG.info("Test 3 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
     } else {
@@ -521,9 +539,9 @@ public class TestDFSClientRetries extend
     //
     retries = 50;
     timeWin = 1000;
-    timestamp = System.currentTimeMillis();
+    timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
-    timestamp2 = System.currentTimeMillis();
+    timestamp2 = Time.now();
     assertTrue("Something wrong! Test 4 got Exception with maxmum retries!", pass);
     LOG.info("Test 4 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
   }
@@ -567,7 +585,7 @@ public class TestDFSClientRetries extend
       LOG.info("Path : \"" + file1 + "\"");
 
       // write 1 block to file
-      byte[] buffer = AppendTestUtil.randomBytes(System.currentTimeMillis(), fileLen);
+      byte[] buffer = AppendTestUtil.randomBytes(Time.now(), fileLen);
       stm.write(buffer, 0, fileLen);
       stm.close();
 
@@ -650,6 +668,7 @@ public class TestDFSClientRetries extend
       }
     }
     
+    @Override
     public void run() {
       try {
         fs = cluster.getNewFileSystemInstance(0);
@@ -697,6 +716,7 @@ public class TestDFSClientRetries extend
     public int get() { return counter; }
   }
 
+  @Test
   public void testGetFileChecksum() throws Exception {
     final String f = "/testGetFileChecksum";
     final Path p = new Path(f);
@@ -733,6 +753,7 @@ public class TestDFSClientRetries extend
    * RPC to the server and set rpcTimeout to less than n and ensure
    * that socketTimeoutException is obtained
    */
+  @Test
   public void testClientDNProtocolTimeout() throws IOException {
     final Server server = new TestServer(1, true);
     server.start();
@@ -747,7 +768,7 @@ public class TestDFSClientRetries extend
 
     try {
       proxy = DFSUtil.createClientDatanodeProtocolProxy(
-          fakeDnId, conf, 500, fakeBlock);
+          fakeDnId, conf, 500, false, fakeBlock);
 
       proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
       fail ("Did not get expected exception: SocketTimeoutException");
@@ -767,8 +788,8 @@ public class TestDFSClientRetries extend
    * read call, so the client should expect consecutive calls to behave the same
    * way. See HDFS-3067.
    */
-  public void testRetryOnChecksumFailure()
-      throws UnresolvedLinkException, IOException {
+  @Test
+  public void testRetryOnChecksumFailure() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@@ -809,15 +830,22 @@ public class TestDFSClientRetries extend
   }
 
   /** Test client retry with namenode restarting. */
+  @Test(timeout=300000)
   public void testNamenodeRestart() throws Exception {
+    namenodeRestartTest(new Configuration(), false);
+  }
+
+  public static void namenodeRestartTest(final Configuration conf,
+      final boolean isWebHDFS) throws Exception {
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
 
     final List<Exception> exceptions = new ArrayList<Exception>();
 
     final Path dir = new Path("/testNamenodeRestart");
 
-    final Configuration conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
+    conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);
 
     final short numDatanodes = 3;
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -826,23 +854,71 @@ public class TestDFSClientRetries extend
     try {
       cluster.waitActive();
       final DistributedFileSystem dfs = cluster.getFileSystem();
+      final FileSystem fs = isWebHDFS?
+          WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
       final URI uri = dfs.getUri();
       assertTrue(HdfsUtils.isHealthy(uri));
 
       //create a file
       final long length = 1L << 20;
       final Path file1 = new Path(dir, "foo"); 
-      DFSTestUtil.createFile(dfs, file1, length, numDatanodes, 20120406L);
+      DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
 
       //get file status
-      final FileStatus s1 = dfs.getFileStatus(file1);
+      final FileStatus s1 = fs.getFileStatus(file1);
       assertEquals(length, s1.getLen());
 
+      //create file4, write some data but not close
+      final Path file4 = new Path(dir, "file4"); 
+      final FSDataOutputStream out4 = fs.create(file4, false, 4096,
+          fs.getDefaultReplication(file4), 1024L, null);
+      final byte[] bytes = new byte[1000];
+      new Random().nextBytes(bytes);
+      out4.write(bytes);
+      out4.write(bytes);
+      out4.hflush();
+
       //shutdown namenode
       assertTrue(HdfsUtils.isHealthy(uri));
       cluster.shutdownNameNode(0);
       assertFalse(HdfsUtils.isHealthy(uri));
 
+      //namenode is down, continue writing file4 in a thread
+      final Thread file4thread = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //write some more data and then close the file
+            out4.write(bytes);
+            out4.write(bytes);
+            out4.write(bytes);
+            out4.close();
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      file4thread.start();
+
+      //namenode is down, read the file in a thread
+      final Thread reader = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //it should retry till namenode is up.
+            final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
+            final FSDataInputStream in = fs.open(file1);
+            int count = 0;
+            for(; in.read() != -1; count++);
+            in.close();
+            assertEquals(s1.getLen(), count);
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      reader.start();
+
       //namenode is down, create another file in a thread
       final Path file3 = new Path(dir, "file"); 
       final Thread thread = new Thread(new Runnable() {
@@ -850,7 +926,7 @@ public class TestDFSClientRetries extend
         public void run() {
           try {
             //it should retry till namenode is up.
-            final FileSystem fs = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+            final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
             DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
           } catch (Exception e) {
             exceptions.add(e);
@@ -877,12 +953,31 @@ public class TestDFSClientRetries extend
       }).start();
 
       //namenode is down, it should retry until namenode is up again. 
-      final FileStatus s2 = dfs.getFileStatus(file1);
+      final FileStatus s2 = fs.getFileStatus(file1);
       assertEquals(s1, s2);
 
       //check file1 and file3
       thread.join();
-      assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file3));
+      assertEmpty(exceptions);
+      assertEquals(s1.getLen(), fs.getFileStatus(file3).getLen());
+      assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file3));
+
+      reader.join();
+      assertEmpty(exceptions);
+
+      //check file4
+      file4thread.join();
+      assertEmpty(exceptions);
+      {
+        final FSDataInputStream in = fs.open(file4);
+        int count = 0;
+        for(int r; (r = in.read()) != -1; count++) {
+          Assert.assertEquals(String.format("count=%d", count),
+              bytes[count % bytes.length], (byte)r);
+        }
+        Assert.assertEquals(5 * bytes.length, count);
+        in.close();
+      }
 
       //enter safe mode
       assertTrue(HdfsUtils.isHealthy(uri));
@@ -907,8 +1002,8 @@ public class TestDFSClientRetries extend
 
       //namenode is in safe mode, create should retry until it leaves safe mode.
       final Path file2 = new Path(dir, "bar");
-      DFSTestUtil.createFile(dfs, file2, length, numDatanodes, 20120406L);
-      assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file2));
+      DFSTestUtil.createFile(fs, file2, length, numDatanodes, 20120406L);
+      assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file2));
       
       assertTrue(HdfsUtils.isHealthy(uri));
 
@@ -916,24 +1011,46 @@ public class TestDFSClientRetries extend
       final Path nonExisting = new Path(dir, "nonExisting");
       LOG.info("setPermission: " + nonExisting);
       try {
-        dfs.setPermission(nonExisting, new FsPermission((short)0));
+        fs.setPermission(nonExisting, new FsPermission((short)0));
         fail();
       } catch(FileNotFoundException fnfe) {
         LOG.info("GOOD!", fnfe);
       }
 
-      if (!exceptions.isEmpty()) {
-        LOG.error("There are " + exceptions.size() + " exception(s):");
-        for(int i = 0; i < exceptions.size(); i++) {
-          LOG.error("Exception " + i, exceptions.get(i));
-        }
-        fail();
-      }
+      assertEmpty(exceptions);
     } finally {
       cluster.shutdown();
     }
   }
 
+  static void assertEmpty(final List<Exception> exceptions) {
+    if (!exceptions.isEmpty()) {
+      final StringBuilder b = new StringBuilder("There are ")
+        .append(exceptions.size())
+        .append(" exception(s):");
+      for(int i = 0; i < exceptions.size(); i++) {
+        b.append("\n  Exception ")
+         .append(i)
+         .append(": ")
+         .append(StringUtils.stringifyException(exceptions.get(i)));
+      }
+      fail(b.toString());
+    }
+  }
+
+  private static FileSystem createFsWithDifferentUsername(
+      final Configuration conf, final boolean isWebHDFS
+      ) throws IOException, InterruptedException {
+    final String username = UserGroupInformation.getCurrentUser(
+        ).getShortUserName() + "_XXX";
+    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+        username, new String[]{"supergroup"});
+
+    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
+        : DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
+  @Test
   public void testMultipleLinearRandomRetry() {
     parseMultipleLinearRandomRetry(null, "");
     parseMultipleLinearRandomRetry(null, "11");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Fri Oct 19 02:25:55 2012
@@ -17,17 +17,21 @@
 */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
 import java.io.File;
 import java.util.Collections;
 import java.util.List;
 
-import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.junit.After;
+import org.junit.Test;
 
 import com.google.common.collect.Lists;
 
@@ -35,7 +39,7 @@ import com.google.common.collect.Lists;
  * This test ensures the appropriate response from the system when 
  * the system is finalized.
  */
-public class TestDFSFinalize extends TestCase {
+public class TestDFSFinalize {
  
   private static final Log LOG = LogFactory.getLog(
                                                    "org.apache.hadoop.hdfs.TestDFSFinalize");
@@ -86,6 +90,7 @@ public class TestDFSFinalize extends Tes
   /**
    * This test attempts to finalize the NameNode and DataNode.
    */
+  @Test
   public void testFinalize() throws Exception {
     UpgradeUtilities.initialize();
     
@@ -125,7 +130,8 @@ public class TestDFSFinalize extends Tes
     } // end numDir loop
   }
  
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) cluster.shutdown();
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java Fri Oct 19 02:25:55 2012
@@ -17,34 +17,43 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.*;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
 
 /**
- * This class tests that the DFS command mkdirs cannot create subdirectories
- * from a file when passed an illegal path.  HADOOP-281.
+ * This class tests that the DFS command mkdirs only creates valid
+ * directories, and generally behaves as expected.
  */
-public class TestDFSMkdirs extends TestCase {
+public class TestDFSMkdirs {
+  private Configuration conf = new HdfsConfiguration();
+
+  private static final String[] NON_CANONICAL_PATHS = new String[] {
+      "//test1",
+      "/test2/..",
+      "/test2//bar",
+      "/test2/../test4",
+      "/test5/."
+  };
 
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    DataOutputStream stm = fileSys.create(name);
-    stm.writeBytes("wchien");
-    stm.close();
-  }
-  
   /**
    * Tests mkdirs can create a directory that does not exist and will
-   * not create a subdirectory off a file.
+   * not create a subdirectory off a file. Regression test for HADOOP-281.
    */
+  @Test
   public void testDFSMkdirs() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       // First create a new directory with mkdirs
@@ -55,7 +64,7 @@ public class TestDFSMkdirs extends TestC
 
       // Second, create a file in that directory.
       Path myFile = new Path("/test/mkdirs/myFile");
-      writeFile(fileSys, myFile);
+      DFSTestUtil.writeFile(fileSys, myFile, "hello world");
    
       // Third, use mkdir to create a subdirectory off of that file,
       // and check that it fails.
@@ -79,18 +88,19 @@ public class TestDFSMkdirs extends TestC
   /**
    * Tests mkdir will not create directory when parent is missing.
    */
+  @Test
   public void testMkdir() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     try {
       // Create a dir in root dir, should succeed
-      assertTrue(dfs.mkdir(new Path("/mkdir-" + System.currentTimeMillis()),
+      assertTrue(dfs.mkdir(new Path("/mkdir-" + Time.now()),
           FsPermission.getDefault()));
       // Create a dir when parent dir exists as a file, should fail
       IOException expectedException = null;
-      String filePath = "/mkdir-file-" + System.currentTimeMillis();
-      writeFile(dfs, new Path(filePath));
+      String filePath = "/mkdir-file-" + Time.now();
+      DFSTestUtil.writeFile(dfs, new Path(filePath), "hello world");
       try {
         dfs.mkdir(new Path(filePath + "/mkdir"), FsPermission.getDefault());
       } catch (IOException e) {
@@ -103,7 +113,7 @@ public class TestDFSMkdirs extends TestC
       // Create a dir in a non-exist directory, should fail
       expectedException = null;
       try {
-        dfs.mkdir(new Path("/non-exist/mkdir-" + System.currentTimeMillis()),
+        dfs.mkdir(new Path("/non-exist/mkdir-" + Time.now()),
             FsPermission.getDefault());
       } catch (IOException e) {
         expectedException = e;
@@ -117,4 +127,29 @@ public class TestDFSMkdirs extends TestC
       cluster.shutdown();
     }
   }
+
+  /**
+   * Regression test for HDFS-3626. Creates a file using a non-canonical path
+   * (i.e. with extra slashes between components) and makes sure that the NN
+   * rejects it.
+   */
+  @Test
+  public void testMkdirRpcNonCanonicalPath() throws IOException {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    try {
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+      
+      for (String pathStr : NON_CANONICAL_PATHS) {
+        try {
+          nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
+          fail("Did not fail when called with a non-canonicalized path: "
+             + pathStr);
+        } catch (InvalidPathException ipe) {
+          // expected
+        }
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Fri Oct 19 02:25:55 2012
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 
-import junit.framework.AssertionFailedError;
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -35,12 +36,15 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 /** Unit tests for permission */
-public class TestDFSPermission extends TestCase {
+public class TestDFSPermission {
   public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
   final private static Configuration conf = new HdfsConfiguration();
   
@@ -77,7 +81,7 @@ public class TestDFSPermission extends T
   static {
     try {
       // Initiate the random number generator and logging the seed
-      long seed = Util.now();
+      long seed = Time.now();
       r = new Random(seed);
       LOG.info("Random number generator uses seed " + seed);
       LOG.info("NUM_TEST_PERMISSIONS=" + NUM_TEST_PERMISSIONS);
@@ -105,13 +109,13 @@ public class TestDFSPermission extends T
     }
   }
 
-  @Override
+  @Before
   public void setUp() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
   }
   
-  @Override
+  @After
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -121,6 +125,7 @@ public class TestDFSPermission extends T
   /** This tests if permission setting in create, mkdir, and 
    * setPermission works correctly
    */
+  @Test
   public void testPermissionSetting() throws Exception {
     testPermissionSetting(OpType.CREATE); // test file creation
     testPermissionSetting(OpType.MKDIRS); // test directory creation
@@ -256,6 +261,7 @@ public class TestDFSPermission extends T
    * check that ImmutableFsPermission can be used as the argument
    * to setPermission
    */
+  @Test
   public void testImmutableFsPermission() throws IOException {
     fs = FileSystem.get(conf);
 
@@ -265,6 +271,7 @@ public class TestDFSPermission extends T
   }
   
   /* check if the ownership of a file/directory is set correctly */
+  @Test
   public void testOwnership() throws Exception {
     testOwnership(OpType.CREATE); // test file creation
     testOwnership(OpType.MKDIRS); // test directory creation
@@ -353,6 +360,7 @@ public class TestDFSPermission extends T
 
   /* Check if namenode performs permission checking correctly for
    * superuser, file owner, group owner, and other users */
+  @Test
   public void testPermissionChecking() throws Exception {
     try {
       fs = FileSystem.get(conf);
@@ -532,7 +540,7 @@ public class TestDFSPermission extends T
         } catch(AccessControlException e) {
           assertTrue(expectPermissionDeny());
         }
-      } catch (AssertionFailedError ae) {
+      } catch (AssertionError ae) {
         logPermissions();
         throw ae;
       }
@@ -879,6 +887,7 @@ public class TestDFSPermission extends T
                 requiredAncestorPermission;
     }
 
+    @Override
     protected void logPermissions() {
       super.logPermissions();
       LOG.info("dst ancestor permission: "

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java Fri Oct 19 02:25:55 2012
@@ -16,6 +16,8 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -26,8 +28,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.junit.Test;
 
-public class TestDFSRemove extends junit.framework.TestCase {
+public class TestDFSRemove {
   final Path dir = new Path("/test/remove/");
 
   void list(FileSystem fs, String name) throws IOException {
@@ -51,6 +54,7 @@ public class TestDFSRemove extends junit
     return total;
   }
   
+  @Test
   public void testRemove() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java Fri Oct 19 02:25:55 2012
@@ -16,6 +16,9 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -25,8 +28,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.junit.Test;
 
-public class TestDFSRename extends junit.framework.TestCase {
+public class TestDFSRename {
   static int countLease(MiniDFSCluster cluster) {
     return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
   }
@@ -46,6 +50,7 @@ public class TestDFSRename extends junit
     a_out.close();
   }
   
+  @Test
   public void testRename() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Fri Oct 19 02:25:55 2012
@@ -19,22 +19,25 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Test;
 
 import com.google.common.base.Charsets;
 import com.google.common.collect.Lists;
@@ -44,7 +47,7 @@ import com.google.common.collect.Lists;
 * the system when the system is rolled back under various storage state and
 * version conditions.
 */
-public class TestDFSRollback extends TestCase {
+public class TestDFSRollback {
  
   private static final Log LOG = LogFactory.getLog(
                                                    "org.apache.hadoop.hdfs.TestDFSRollback");
@@ -131,6 +134,7 @@ public class TestDFSRollback extends Tes
    * This test attempts to rollback the NameNode and DataNode under
    * a number of valid and invalid conditions.
    */
+  @Test
   public void testRollback() throws Exception {
     File[] baseDirs;
     UpgradeUtilities.initialize();
@@ -299,7 +303,8 @@ public class TestDFSRollback extends Tes
     }
   }
 
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) cluster.shutdown();
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayOutputStream;
 import java.io.DataOutputStream;
 import java.io.File;
@@ -31,10 +35,9 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
 import java.util.Scanner;
+import java.util.zip.DeflaterOutputStream;
 import java.util.zip.GZIPOutputStream;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -50,14 +53,20 @@ import org.apache.hadoop.hdfs.tools.DFSA
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.compress.BZip2Codec;
+import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 
 /**
  * This class tests commands from DFSShell.
  */
-public class TestDFSShell extends TestCase {
+public class TestDFSShell {
   private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
   
   static final String TEST_ROOT_DIR =
@@ -94,6 +103,7 @@ public class TestDFSShell extends TestCa
     System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
   }
 
+  @Test
   public void testZeroSizeFile() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -136,6 +146,7 @@ public class TestDFSShell extends TestCa
     }
   }
   
+  @Test
   public void testRecrusiveRm() throws IOException {
 	  Configuration conf = new HdfsConfiguration();
 	  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -161,6 +172,7 @@ public class TestDFSShell extends TestCa
     }
   }
     
+  @Test
   public void testDu() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -210,6 +222,7 @@ public class TestDFSShell extends TestCa
     }
                                   
   }
+  @Test
   public void testPut() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -231,6 +244,7 @@ public class TestDFSShell extends TestCa
       show("begin");
       
       final Thread copy2ndFileThread = new Thread() {
+        @Override
         public void run() {
           try {
             show("copy local " + f2 + " to remote " + dst);
@@ -250,6 +264,7 @@ public class TestDFSShell extends TestCa
       System.setSecurityManager(new SecurityManager() {
         private boolean firstTime = true;
   
+        @Override
         public void checkPermission(Permission perm) {
           if (firstTime) {
             Thread t = Thread.currentThread();
@@ -306,6 +321,7 @@ public class TestDFSShell extends TestCa
 
 
   /** check command error outputs and exit statuses. */
+  @Test
   public void testErrOutPut() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
@@ -446,6 +462,7 @@ public class TestDFSShell extends TestCa
     }
   }
   
+  @Test
   public void testURIPaths() throws Exception {
     Configuration srcConf = new HdfsConfiguration();
     Configuration dstConf = new HdfsConfiguration();
@@ -538,6 +555,7 @@ public class TestDFSShell extends TestCa
     }
   }
 
+  @Test
   public void testText() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
@@ -563,6 +581,8 @@ public class TestDFSShell extends TestCa
     try {
       final FileSystem fs = root.getFileSystem(conf);
       fs.mkdirs(root);
+
+      // Test the gzip type of files. Magic detection.
       OutputStream zout = new GZIPOutputStream(
           fs.create(new Path(root, "file.gz")));
       Random r = new Random();
@@ -587,7 +607,7 @@ public class TestDFSShell extends TestCa
           Arrays.equals(file.toByteArray(), out.toByteArray()));
 
       // Create a sequence file with a gz extension, to test proper
-      // container detection
+      // container detection. Magic detection.
       SequenceFile.Writer writer = SequenceFile.createWriter(
           conf,
           SequenceFile.Writer.file(new Path(root, "file.gz")),
@@ -605,6 +625,45 @@ public class TestDFSShell extends TestCa
       assertTrue("Output doesn't match input",
           Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()));
       out.reset();
+
+      // Test deflate. Extension-based detection.
+      OutputStream dout = new DeflaterOutputStream(
+          fs.create(new Path(root, "file.deflate")));
+      byte[] outbytes = "foo".getBytes();
+      dout.write(outbytes);
+      dout.close();
+      out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+      argv = new String[2];
+      argv[0] = "-text";
+      argv[1] = new Path(root, "file.deflate").toString();
+      ret = ToolRunner.run(new FsShell(conf), argv);
+      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
+      assertTrue("Output doesn't match input",
+          Arrays.equals(outbytes, out.toByteArray()));
+      out.reset();
+
+      // Test a simple codec. Extension based detection. We use
+      // Bzip2 cause its non-native.
+      CompressionCodec codec = (CompressionCodec)
+          ReflectionUtils.newInstance(BZip2Codec.class, conf);
+      String extension = codec.getDefaultExtension();
+      Path p = new Path(root, "file." + extension);
+      OutputStream fout = new DataOutputStream(codec.createOutputStream(
+          fs.create(p, true)));
+      byte[] writebytes = "foo".getBytes();
+      fout.write(writebytes);
+      fout.close();
+      out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+      argv = new String[2];
+      argv[0] = "-text";
+      argv[1] = new Path(root, p).toString();
+      ret = ToolRunner.run(new FsShell(conf), argv);
+      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
+      assertTrue("Output doesn't match input",
+          Arrays.equals(writebytes, out.toByteArray()));
+      out.reset();
     } finally {
       if (null != bak) {
         System.setOut(bak);
@@ -612,6 +671,7 @@ public class TestDFSShell extends TestCa
     }
   }
 
+  @Test
   public void testCopyToLocal() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -709,6 +769,7 @@ public class TestDFSShell extends TestCa
     return path;
   }
 
+  @Test
   public void testCount() throws Exception {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -875,6 +936,7 @@ public class TestDFSShell extends TestCa
     }
   }
   
+  @Test
   public void testFilePermissions() throws IOException {
     Configuration conf = new HdfsConfiguration();
     
@@ -940,6 +1002,7 @@ public class TestDFSShell extends TestCa
   /**
    * Tests various options of DFSShell.
    */
+  @Test
   public void testDFSShell() throws IOException {
     Configuration conf = new HdfsConfiguration();
     /* This tests some properties of ChecksumFileSystem as well.
@@ -1092,6 +1155,19 @@ public class TestDFSShell extends TestCa
         }
         assertEquals(0, val);
 
+        args = new String[2];
+        args[0] = "-touchz";
+        args[1] = "/test/mkdirs/thisDirNotExists/noFileHere";
+        val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(1, val);
+
+
         args = new String[3];
         args[0] = "-test";
         args[1] = "-e";
@@ -1207,6 +1283,7 @@ public class TestDFSShell extends TestCa
     String run(int exitcode, String... options) throws IOException;
   }
 
+  @Test
   public void testRemoteException() throws Exception {
     UserGroupInformation tmpUGI = 
       UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
@@ -1250,9 +1327,15 @@ public class TestDFSShell extends TestCa
     }
   }
   
+  @Test
   public void testGet() throws IOException {
     DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
     final Configuration conf = new HdfsConfiguration();
+    // Race can happen here: block scanner is reading the file when test tries
+    // to corrupt the test file, which will fail the test on Windows platform.
+    // Disable block scanner to avoid this race.
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
+    
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
@@ -1269,6 +1352,7 @@ public class TestDFSShell extends TestCa
       TestGetRunner runner = new TestGetRunner() {
         private int count = 0;
 
+        @Override
         public String run(int exitcode, String... options) throws IOException {
           String dst = TEST_ROOT_DIR + "/" + fname+ ++count;
           String[] args = new String[options.length + 3];
@@ -1309,6 +1393,7 @@ public class TestDFSShell extends TestCa
     }
   }
 
+  @Test
   public void testLsr() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -1366,6 +1451,7 @@ public class TestDFSShell extends TestCa
    * and return -1 exit code.
    * @throws Exception
    */
+  @Test
   public void testInvalidShell() throws Exception {
     Configuration conf = new Configuration(); // default FS (non-DFS)
     DFSAdmin admin = new DFSAdmin();
@@ -1375,6 +1461,7 @@ public class TestDFSShell extends TestCa
   }
 
   // force Copy Option is -f
+  @Test
   public void testCopyCommandsWithForceOption() throws Exception {
     Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
@@ -1440,4 +1527,95 @@ public class TestDFSShell extends TestCa
 
   }
 
+  /**
+   * Delete a file optionally configuring trash on the server and client.
+   */
+  private void deleteFileUsingTrash(
+      boolean serverTrash, boolean clientTrash) throws Exception {
+    // Run a cluster, optionally with trash enabled on the server
+    Configuration serverConf = new HdfsConfiguration();
+    if (serverTrash) {
+      serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    }
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf)
+      .numDataNodes(1).format(true).build();
+    Configuration clientConf = new Configuration(serverConf);
+
+    // Create a client, optionally with trash enabled
+    if (clientTrash) {
+      clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    } else {
+      clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
+    }
+
+    FsShell shell = new FsShell(clientConf);
+    FileSystem fs = null;
+
+    try {
+      // Create and delete a file
+      fs = cluster.getFileSystem();
+      writeFile(fs, new Path(TEST_ROOT_DIR, "foo"));
+      final String testFile = TEST_ROOT_DIR + "/foo";
+      final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
+      String[] argv = new String[] { "-rm", testFile };
+      int res = ToolRunner.run(shell, argv);
+      assertEquals("rm failed", 0, res);
+
+      if (serverTrash) {
+        // If the server config was set we should use it unconditionally
+        assertTrue("File not in trash", fs.exists(new Path(trashFile)));
+      } else if (clientTrash) {
+        // If the server config was not set but the client config was
+        // set then we should use it
+        assertTrue("File not in trashed", fs.exists(new Path(trashFile)));
+      } else {
+        // If neither was set then we should not have trashed the file
+        assertFalse("File was not removed", fs.exists(new Path(testFile)));
+        assertFalse("File was trashed", fs.exists(new Path(trashFile)));
+      }
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test that the server trash configuration is respected when
+   * the client configuration is not set.
+   */
+  @Test
+  public void testServerConfigRespected() throws Exception {
+    deleteFileUsingTrash(true, false);
+  }
+
+  /**
+   * Test that server trash configuration is respected even when the
+   * client configuration is set.
+   */
+  @Test
+  public void testServerConfigRespectedWithClient() throws Exception {
+    deleteFileUsingTrash(true, true);
+  }
+
+  /**
+   * Test that the client trash configuration is respected when
+   * the server configuration is not set.
+   */
+  @Test
+  public void testClientConfigRespected() throws Exception {
+    deleteFileUsingTrash(false, true);
+  }
+
+  /**
+   * Test that trash is disabled by default.
+   */
+  @Test
+  public void testNoTrashConfig() throws Exception {
+    deleteFileUsingTrash(false, false);
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java Fri Oct 19 02:25:55 2012
@@ -17,22 +17,24 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintWriter;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
 
-public class TestDFSShellGenericOptions extends TestCase {
+public class TestDFSShellGenericOptions {
 
+  @Test
   public void testDFSCommand() throws IOException {
     String namenode = null;
     MiniDFSCluster cluster = null;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Fri Oct 19 02:25:55 2012
@@ -19,25 +19,27 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.junit.After;
+import org.junit.Test;
 
 /**
  * This test ensures the appropriate response (successful or failure) from 
  * a Datanode when the system is started with differing version combinations. 
  */
-public class TestDFSStartupVersions extends TestCase {
+public class TestDFSStartupVersions {
   
   private static final Log LOG = LogFactory.getLog(
                                                    "org.apache.hadoop.hdfs.TestDFSStartupVersions");
@@ -235,6 +237,7 @@ public class TestDFSStartupVersions exte
    *         this iterations version 3-tuple
    * </pre>
    */
+  @Test
   public void testVersions() throws Exception {
     UpgradeUtilities.initialize();
     Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
@@ -276,7 +279,8 @@ public class TestDFSStartupVersions exte
     }
   }
   
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) cluster.shutdown();
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java Fri Oct 19 02:25:55 2012
@@ -17,25 +17,32 @@
 */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
 import java.io.IOException;
-import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
 * This test ensures the appropriate response (successful or failure) from
 * the system when the system is started under various storage state and
 * version conditions.
 */
-public class TestDFSStorageStateRecovery extends TestCase {
+public class TestDFSStorageStateRecovery {
  
   private static final Log LOG = LogFactory.getLog(
                                                    "org.apache.hadoop.hdfs.TestDFSStorageStateRecovery");
@@ -311,6 +318,7 @@ public class TestDFSStorageStateRecovery
    * This test iterates over the testCases table and attempts
    * to startup the NameNode normally.
    */
+  @Test
   public void testNNStorageStates() throws Exception {
     String[] baseDirs;
 
@@ -354,6 +362,7 @@ public class TestDFSStorageStateRecovery
    * This test iterates over the testCases table for Datanode storage and
    * attempts to startup the DataNode normally.
    */
+  @Test
   public void testDNStorageStates() throws Exception {
     String[] baseDirs;
 
@@ -394,6 +403,7 @@ public class TestDFSStorageStateRecovery
    * This test iterates over the testCases table for block pool storage and
    * attempts to startup the DataNode normally.
    */
+  @Test
   public void testBlockPoolStorageStates() throws Exception {
     String[] baseDirs;
 
@@ -431,13 +441,15 @@ public class TestDFSStorageStateRecovery
     } // end numDirs loop
   }
 
-  protected void setUp() throws Exception {
+  @Before
+  public void setUp() throws Exception {
     LOG.info("Setting up the directory structures.");
     UpgradeUtilities.initialize();
   }
 
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) cluster.shutdown();
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java Fri Oct 19 02:25:55 2012
@@ -19,6 +19,13 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
+import static org.apache.hadoop.test.GenericTestUtils.assertExists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -27,14 +34,10 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
-
-import static org.apache.hadoop.test.GenericTestUtils.assertExists;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
@@ -43,8 +46,6 @@ import org.junit.Test;
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 
-import static org.junit.Assert.*;
-
 /**
 * This test ensures the appropriate response (successful or failure) from
 * the system when the system is upgraded under various storage state and

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Fri Oct 19 02:25:55 2012
@@ -18,35 +18,46 @@
 
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.TreeMap;
 import java.util.zip.CRC32;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.junit.Test;
 
 /**
  * This tests data transfer protocol handling in the Datanode. It sends
  * various forms of wrong data and verifies that Datanode handles it well.
  * 
- * This test uses the following two file from src/test/.../dfs directory :
- *   1) hadoop-version-dfs-dir.tgz : contains DFS directories.
+ * This test uses the following items from src/test/.../dfs directory :
+ *   1) hadoop-22-dfs-dir.tgz and other tarred pre-upgrade NN / DN 
+ *      directory images
  *   2) hadoop-dfs-dir.txt : checksums that are compared in this test.
  * Please read hadoop-dfs-dir.txt for more information.  
  */
-public class TestDFSUpgradeFromImage extends TestCase {
+public class TestDFSUpgradeFromImage {
   
   private static final Log LOG = LogFactory
       .getLog(TestDFSUpgradeFromImage.class);
@@ -54,14 +65,23 @@ public class TestDFSUpgradeFromImage ext
                       new File(MiniDFSCluster.getBaseDirectory());
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
-  
-  public int numDataNodes = 4;
-  
+  private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+
   private static class ReferenceFileInfo {
     String path;
     long checksum;
   }
   
+  private static final Configuration upgradeConf;
+  
+  static {
+    upgradeConf = new HdfsConfiguration();
+    upgradeConf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
+    if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Maven
+      System.setProperty("test.build.data", "build/test/data");
+    }
+  }
+  
   LinkedList<ReferenceFileInfo> refList = new LinkedList<ReferenceFileInfo>();
   Iterator<ReferenceFileInfo> refIter;
   
@@ -129,11 +149,33 @@ public class TestDFSUpgradeFromImage ext
     }
   }
   
-  CRC32 overallChecksum = new CRC32();
+  /**
+   * Try to open a file for reading several times.
+   * 
+   * If we fail because lease recovery hasn't completed, retry the open.
+   */
+  private static FSInputStream dfsOpenFileWithRetries(DistributedFileSystem dfs,
+      String pathName) throws IOException {
+    IOException exc = null;
+    for (int tries = 0; tries < 10; tries++) {
+      try {
+        return dfs.dfs.open(pathName);
+      } catch (IOException e) {
+        exc = e;
+      }
+      if (!exc.getMessage().contains("Cannot obtain " +
+          "block length for LocatedBlock")) {
+        throw exc;
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException ignored) {}
+    }
+    throw exc;
+  }
   
-  private void verifyDir(DistributedFileSystem dfs, Path dir) 
-                                           throws IOException {
-    
+  private void verifyDir(DistributedFileSystem dfs, Path dir,
+      CRC32 overallChecksum) throws IOException {
     FileStatus[] fileArr = dfs.listStatus(dir);
     TreeMap<Path, Boolean> fileMap = new TreeMap<Path, Boolean>();
     
@@ -149,11 +191,11 @@ public class TestDFSUpgradeFromImage ext
       overallChecksum.update(pathName.getBytes());
       
       if ( isDir ) {
-        verifyDir(dfs, path);
+        verifyDir(dfs, path, overallChecksum);
       } else {
         // this is not a directory. Checksum the file data.
         CRC32 fileCRC = new CRC32();
-        FSInputStream in = dfs.dfs.open(pathName);
+        FSInputStream in = dfsOpenFileWithRetries(dfs, pathName);
         byte[] buf = new byte[4096];
         int nRead = 0;
         while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) {
@@ -167,7 +209,8 @@ public class TestDFSUpgradeFromImage ext
   
   private void verifyFileSystem(DistributedFileSystem dfs) throws IOException {
   
-    verifyDir(dfs, new Path("/"));
+    CRC32 overallChecksum = new CRC32();
+    verifyDir(dfs, new Path("/"), overallChecksum);
     
     verifyChecksum("overallCRC", overallChecksum.getValue());
     
@@ -182,6 +225,7 @@ public class TestDFSUpgradeFromImage ext
    * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
    * NN, verifying that the correct error message is thrown.
    */
+  @Test
   public void testFailOnPreUpgradeImage() throws IOException {
     Configuration conf = new HdfsConfiguration();
 
@@ -225,15 +269,18 @@ public class TestDFSUpgradeFromImage ext
   /**
    * Test upgrade from 0.22 image
    */
+  @Test
   public void testUpgradeFromRel22Image() throws IOException {
     unpackStorage(HADOOP22_IMAGE);
-    upgradeAndVerify();
+    upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
+        numDataNodes(4));
   }
   
   /**
    * Test upgrade from 0.22 image with corrupt md5, make sure it
    * fails to upgrade
    */
+  @Test
   public void testUpgradeFromCorruptRel22Image() throws IOException {
     unpackStorage(HADOOP22_IMAGE);
     
@@ -248,7 +295,8 @@ public class TestDFSUpgradeFromImage ext
     
     // Upgrade should now fail
     try {
-      upgradeAndVerify();
+      upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
+          numDataNodes(4));
       fail("Upgrade did not fail with bad MD5");
     } catch (IOException ioe) {
       String msg = StringUtils.stringifyException(ioe);
@@ -257,21 +305,34 @@ public class TestDFSUpgradeFromImage ext
       }
     }
   }
-
-  private void upgradeAndVerify() throws IOException {
+    
+  static void recoverAllLeases(DFSClient dfs, 
+      Path path) throws IOException {
+    String pathStr = path.toString();
+    HdfsFileStatus status = dfs.getFileInfo(pathStr);
+    if (!status.isDir()) {
+      dfs.recoverLease(pathStr);
+      return;
+    }
+    byte prev[] = HdfsFileStatus.EMPTY_NAME;
+    DirectoryListing dirList;
+    do {
+      dirList = dfs.listPaths(pathStr, prev);
+      HdfsFileStatus files[] = dirList.getPartialListing();
+      for (HdfsFileStatus f : files) {
+        recoverAllLeases(dfs, f.getFullPath(path));
+      }
+      prev = dirList.getLastName();
+    } while (dirList.hasMore());
+  }
+  
+  private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+      throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new HdfsConfiguration();
-      if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
-        System.setProperty("test.build.data", "build/test/data");
-      }
-      conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
-      cluster = new MiniDFSCluster.Builder(conf)
-                                  .numDataNodes(numDataNodes)
-                                  .format(false)
-                                  .startupOption(StartupOption.UPGRADE)
-                                  .clusterId("testClusterId")
-                                  .build();
+      bld.format(false).startupOption(StartupOption.UPGRADE)
+        .clusterId("testClusterId");
+      cluster = bld.build();
       cluster.waitActive();
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
       DFSClient dfsClient = dfs.dfs;
@@ -282,12 +343,27 @@ public class TestDFSUpgradeFromImage ext
           Thread.sleep(1000);
         } catch (InterruptedException ignored) {}
       }
-
+      recoverAllLeases(dfsClient, new Path("/"));
       verifyFileSystem(dfs);
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     } 
   }
 
-
+  /**
+   * Test upgrade from a 1.x image with some blocksBeingWritten
+   */
+  @Test
+  public void testUpgradeFromRel1BBWImage() throws IOException {
+    unpackStorage(HADOOP1_BBW_IMAGE);
+    Configuration conf = new Configuration(upgradeConf);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
+        System.getProperty("test.build.data") + File.separator + 
+        "dfs" + File.separator + 
+        "data" + File.separator + 
+        "data1");
+    upgradeAndVerify(new MiniDFSCluster.Builder(conf).
+          numDataNodes(1).enableManagedDfsDirsRedundancy(false).
+          manageDataDfsDirs(false));
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Fri Oct 19 02:25:55 2012
@@ -18,10 +18,22 @@
 
 package org.apache.hadoop.hdfs;
 
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -34,18 +46,18 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import org.junit.Before;
+import org.junit.Test;
 
 public class TestDFSUtil {
   
@@ -608,4 +620,12 @@ public class TestDFSUtil {
     assertEquals(1, uris.size());
     assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
   }
+  
+  @Test
+  public void testIsValidName() {
+    assertFalse(DFSUtil.isValidName("/foo/../bar"));
+    assertFalse(DFSUtil.isValidName("/foo//bar"));
+    assertTrue(DFSUtil.isValidName("/"));
+    assertTrue(DFSUtil.isValidName("/bar/"));
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java Fri Oct 19 02:25:55 2012
@@ -20,7 +20,10 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.InputStream;
 import java.io.PrintWriter;
@@ -110,7 +113,7 @@ public class TestDataTransferKeepalive {
     
     // Take it out of the cache - reading should
     // give an EOF.
-    Socket s = dfsClient.socketCache.get(dnAddr);
+    Socket s = dfsClient.socketCache.get(dnAddr).sock;
     assertNotNull(s);
     assertEquals(-1, NetUtils.getInputStream(s).read());
   }



Mime
View raw message