hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1532910 [4/4] - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apac...
Date Wed, 16 Oct 2013 21:07:38 GMT
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileInputStreamCache.java Wed Oct 16 21:07:28 2013
@@ -17,12 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-
 import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -31,6 +26,10 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.junit.Test;
 
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
 public class TestFileInputStreamCache {
   static final Log LOG = LogFactory.getLog(TestFileInputStreamCache.class);
 
@@ -80,7 +79,7 @@ public class TestFileInputStreamCache {
   public void testAddAndRetrieve() throws Exception {
     FileInputStreamCache cache = new FileInputStreamCache(1, 1000000);
     DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost", 
-        "xyzzy", 8080, 9090, 7070);
+        "xyzzy", 8080, 9090, 7070, 6060);
     ExtendedBlock block = new ExtendedBlock("poolid", 123);
     TestFileDescriptorPair pair = new TestFileDescriptorPair();
     cache.put(dnId, block, pair.getFileInputStreams());
@@ -94,7 +93,7 @@ public class TestFileInputStreamCache {
   public void testExpiry() throws Exception {
     FileInputStreamCache cache = new FileInputStreamCache(1, 10);
     DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost", 
-        "xyzzy", 8080, 9090, 7070);
+        "xyzzy", 8080, 9090, 7070, 6060);
     ExtendedBlock block = new ExtendedBlock("poolid", 123);
     TestFileDescriptorPair pair = new TestFileDescriptorPair();
     cache.put(dnId, block, pair.getFileInputStreams());
@@ -109,12 +108,12 @@ public class TestFileInputStreamCache {
   public void testEviction() throws Exception {
     FileInputStreamCache cache = new FileInputStreamCache(1, 10000000);
     DatanodeID dnId = new DatanodeID("127.0.0.1", "localhost", 
-        "xyzzy", 8080, 9090, 7070);
+        "xyzzy", 8080, 9090, 7070, 6060);
     ExtendedBlock block = new ExtendedBlock("poolid", 123);
     TestFileDescriptorPair pair = new TestFileDescriptorPair();
     cache.put(dnId, block, pair.getFileInputStreams());
     DatanodeID dnId2 = new DatanodeID("127.0.0.1", "localhost", 
-        "xyzzy", 8081, 9091, 7071);
+        "xyzzy", 8081, 9091, 7071, 6061);
     TestFileDescriptorPair pair2 = new TestFileDescriptorPair();
     cache.put(dnId2, block, pair2.getFileInputStreams());
     FileInputStream fis[] = cache.get(dnId, block);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java Wed Oct 16 21:07:28 2013
@@ -88,19 +88,21 @@ public class TestHftpDelegationToken {
     URI fsUri = URI.create("hftp://localhost");
     MyHftpFileSystem fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
     assertEquals(httpPort, fs.getCanonicalUri().getPort());
-    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    checkTokenSelection(fs, httpPort, conf);
 
     // test with explicit default port
+    // Make sure it uses the port from the hftp URI.
     fsUri = URI.create("hftp://localhost:"+httpPort);
     fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
     assertEquals(httpPort, fs.getCanonicalUri().getPort());
-    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    checkTokenSelection(fs, httpPort, conf);
     
     // test with non-default port
+    // Make sure it uses the port from the hftp URI.
     fsUri = URI.create("hftp://localhost:"+(httpPort+1));
     fs = (MyHftpFileSystem) FileSystem.newInstance(fsUri, conf);
     assertEquals(httpPort+1, fs.getCanonicalUri().getPort());
-    checkTokenSelection(fs, httpsPort, conf); // should still use secure port
+    checkTokenSelection(fs, httpPort + 1, conf);
     
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
   }
@@ -178,7 +180,7 @@ public class TestHftpDelegationToken {
       }
       assertNotNull(ex);
       assertNotNull(ex.getCause());
-      assertEquals("Unexpected end of file from server",
+      assertEquals("Remote host closed connection during handshake",
                    ex.getCause().getMessage());
     } finally {
       t.interrupt();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Wed Oct 16 21:07:28 2013
@@ -294,11 +294,13 @@ public class TestHftpFileSystem {
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     assertEquals(uri, fs.getUri());
+
+    // HFTP uses http to get the token so canonical service name should
+    // return the http port.
     assertEquals(
-        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        "127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
         fs.getCanonicalServiceName()
     );
   }
@@ -307,17 +309,18 @@ public class TestHftpFileSystem {
   public void testHftpCustomDefaultPorts() throws IOException {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
 
     URI uri = URI.create("hftp://localhost");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(123, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
     
     assertEquals(uri, fs.getUri());
+
+    // HFTP uses http to get the token so canonical service name should
+    // return the http port.
     assertEquals(
-        "127.0.0.1:456",
+        "127.0.0.1:123",
         fs.getCanonicalServiceName()
     );
   }
@@ -329,11 +332,10 @@ public class TestHftpFileSystem {
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     assertEquals(uri, fs.getUri());
     assertEquals(
-        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        "127.0.0.1:123",
         fs.getCanonicalServiceName()
     );
   }
@@ -342,17 +344,15 @@ public class TestHftpFileSystem {
   public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
 
     URI uri = URI.create("hftp://localhost:789");
     HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(123, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
    
     assertEquals(uri, fs.getUri()); 
     assertEquals(
-        "127.0.0.1:456",
+        "127.0.0.1:789",
         fs.getCanonicalServiceName()
     );
   }
@@ -366,7 +366,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     assertEquals(uri, fs.getUri());
     assertEquals(
@@ -385,7 +384,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(456, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
     
     assertEquals(uri, fs.getUri());
     assertEquals(
@@ -401,7 +399,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
-    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
 
     assertEquals(uri, fs.getUri());
     assertEquals(
@@ -420,7 +417,6 @@ public class TestHftpFileSystem {
     HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
 
     assertEquals(456, fs.getDefaultPort());
-    assertEquals(456, fs.getDefaultSecurePort());
 
     assertEquals(uri, fs.getUri());
     assertEquals(

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java Wed Oct 16 21:07:28 2013
@@ -31,9 +31,9 @@ public class TestNameNodeHttpServer {
     System.setProperty("jetty.ssl.password", "foo");
     System.setProperty("jetty.ssl.keypassword", "bar");
 
-    MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(0).build();
-    dfsCluster.waitActive();
-    dfsCluster.shutdown();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+        .build();
+
+    cluster.shutdown();
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java Wed Oct 16 21:07:28 2013
@@ -17,26 +17,23 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-
+import com.google.common.collect.HashMultiset;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import com.google.common.collect.HashMultiset;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.ReadableByteChannel;
+
+import static org.junit.Assert.*;
 
 public class TestPeerCache {
   static final Log LOG = LogFactory.getLog(TestPeerCache.class);
@@ -150,7 +147,7 @@ public class TestPeerCache {
     PeerCache cache = new PeerCache(3, 100000);
     DatanodeID dnId = new DatanodeID("192.168.0.1",
           "fakehostname", "fake_datanode_id",
-          100, 101, 102);
+          100, 101, 102, 103);
     FakePeer peer = new FakePeer(dnId, false);
     cache.put(dnId, peer);
     assertTrue(!peer.isClosed());
@@ -170,7 +167,7 @@ public class TestPeerCache {
     for (int i = 0; i < CAPACITY; ++i) {
       dnIds[i] = new DatanodeID("192.168.0.1",
           "fakehostname_" + i, "fake_datanode_id",
-          100, 101, 102);
+          100, 101, 102, 103);
       peers[i] = new FakePeer(dnIds[i], false);
     }
     for (int i = 0; i < CAPACITY; ++i) {
@@ -201,7 +198,7 @@ public class TestPeerCache {
     for (int i = 0; i < dnIds.length; ++i) {
       dnIds[i] = new DatanodeID("192.168.0.1",
           "fakehostname_" + i, "fake_datanode_id_" + i,
-          100, 101, 102);
+          100, 101, 102, 103);
       peers[i] = new FakePeer(dnIds[i], false);
     }
     for (int i = 0; i < CAPACITY; ++i) {
@@ -232,7 +229,7 @@ public class TestPeerCache {
     PeerCache cache = new PeerCache(CAPACITY, 100000);
     DatanodeID dnId = new DatanodeID("192.168.0.1",
           "fakehostname", "fake_datanode_id",
-          100, 101, 102);
+          100, 101, 102, 103);
     HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
     for (int i = 0; i < CAPACITY; ++i) {
       FakePeer peer = new FakePeer(dnId, false);
@@ -257,7 +254,7 @@ public class TestPeerCache {
     PeerCache cache = new PeerCache(CAPACITY, 100000);
     DatanodeID dnId = new DatanodeID("192.168.0.1",
           "fakehostname", "fake_datanode_id",
-          100, 101, 102);
+          100, 101, 102, 103);
     HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
     for (int i = 0; i < CAPACITY; ++i) {
       FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Wed Oct 16 21:07:28 2013
@@ -577,4 +577,64 @@ public class TestShortCircuitLocalRead {
     System.out.println("Iteration " + iteration + " took " + (end - start));
     fs.delete(file1, false);
   }
+
+  public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
+    doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
+  }
+
+  /**
+   * Test that file data can be read by reading the block
+   * through RemoteBlockReader
+   * @throws IOException
+  */
+  public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
+                                                          int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+             .format(true).build();
+    FileSystem fs = cluster.getFileSystem();
+    // check that / exists
+    Path path = new Path("/");
+    URI uri = cluster.getURI();
+    assertTrue("/ should be a directory", fs.getFileStatus(path)
+                .isDirectory() == true);
+
+    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
+    Path file1 = new Path("filelocal.dat");
+    FSDataOutputStream stm = createFile(fs, file1, 1);
+
+    stm.write(fileData);
+    stm.close();
+    try {
+      checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, conf, shortCircuitFails);
+      //RemoteBlockReader have unsupported method read(ByteBuffer bf)
+      assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
+                    checkUnsupportedMethod(fs, file1, fileData, readOffset));
+    } catch(IOException e) {
+      throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
+    } catch(InterruptedException inEx) {
+      throw inEx;
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  private boolean checkUnsupportedMethod(FileSystem fs, Path file,
+                                           byte[] expected, int readOffset) throws IOException {
+    HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
+    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
+    IOUtils.skipFully(stm, readOffset);
+    try {
+      stm.read(actual);
+    } catch(UnsupportedOperationException unex) {
+      return true;
+    }
+    return false;
+  }
+
+
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Wed Oct 16 21:07:28 2013
@@ -17,23 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.common;
 
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.doAnswer;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-
-import javax.servlet.ServletContext;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.jsp.JspWriter;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
-
+import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -43,6 +27,8 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -54,11 +40,27 @@ import org.apache.hadoop.security.token.
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
 
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.jsp.JspWriter;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.IOException;
+import java.io.StringReader;
+import java.net.InetSocketAddress;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
 
 public class TestJspHelper {
 
@@ -446,9 +448,9 @@ public class TestJspHelper {
   @Test
   public void testSortNodeByFields() throws Exception {
     DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "datanode1",
-        1234, 2345, 3456);
+        1234, 2345, 3456, 4567);
     DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "datanode2",
-        1235, 2346, 3457);
+        1235, 2346, 3457, 4568);
     DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
         100, 924, 100, 10, 2);
     DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
@@ -480,5 +482,136 @@ public class TestJspHelper {
     JspHelper.sortNodeList(live, "pcbpused", "DSC");
     Assert.assertEquals(dnDesc1, live.get(0));
     Assert.assertEquals(dnDesc2, live.get(1));
+    
+    //unexisted field comparition is d1.getHostName().compareTo(d2.getHostName());    
+    JspHelper.sortNodeList(live, "unexists", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    
+    JspHelper.sortNodeList(live, "unexists", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));  
+    
+    // test sorting by capacity
+    JspHelper.sortNodeList(live, "capacity", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    
+    JspHelper.sortNodeList(live, "capacity", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+
+    // test sorting by used
+    JspHelper.sortNodeList(live, "used", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    
+    JspHelper.sortNodeList(live, "used", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1)); 
+    
+    // test sorting by nondfsused
+    JspHelper.sortNodeList(live, "nondfsused", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    
+    JspHelper.sortNodeList(live, "nondfsused", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+   
+    // test sorting by remaining
+    JspHelper.sortNodeList(live, "remaining", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    
+    JspHelper.sortNodeList(live, "remaining", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+  }
+  
+  @Test
+  public void testPrintMethods() throws IOException {
+    JspWriter out = mock(JspWriter.class);      
+    HttpServletRequest req = mock(HttpServletRequest.class);
+    
+    final StringBuffer buffer = new StringBuffer();
+    
+    ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
+    doAnswer(new Answer<Object>() {      
+      @Override
+      public Object answer(InvocationOnMock invok) {
+        Object[] args = invok.getArguments();
+        buffer.append((String)args[0]);
+        return null;
+      }
+    }).when(out).print(arg.capture());
+    
+    
+    JspHelper.createTitle(out, req, "testfile.txt");
+    Mockito.verify(out, Mockito.times(1)).print(Mockito.anyString());
+    
+    JspHelper.addTableHeader(out);
+    Mockito.verify(out, Mockito.times(1 + 2)).print(Mockito.anyString());                  
+     
+    JspHelper.addTableRow(out, new String[] {" row11", "row12 "});
+    Mockito.verify(out, Mockito.times(1 + 2 + 4)).print(Mockito.anyString());      
+    
+    JspHelper.addTableRow(out, new String[] {" row11", "row12 "}, 3);
+    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4)).print(Mockito.anyString());
+      
+    JspHelper.addTableRow(out, new String[] {" row21", "row22"});
+    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4)).print(Mockito.anyString());      
+      
+    JspHelper.addTableFooter(out);
+    Mockito.verify(out, Mockito.times(1 + 2 + 4 + 4 + 4 + 1)).print(Mockito.anyString());
+    
+    assertFalse(Strings.isNullOrEmpty(buffer.toString()));               
   }
+  
+  @Test
+  public void testReadWriteReplicaState() {
+    try {
+      DataOutputBuffer out = new DataOutputBuffer();
+      DataInputBuffer in = new DataInputBuffer();
+      for (HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState
+          .values()) {
+        repState.write(out);
+        in.reset(out.getData(), out.getLength());
+        HdfsServerConstants.ReplicaState result = HdfsServerConstants.ReplicaState
+            .read(in);
+        assertTrue("testReadWrite error !!!", repState == result);
+        out.reset();
+        in.reset();
+      }
+    } catch (Exception ex) {
+      fail("testReadWrite ex error ReplicaState");
+    }
+  }
+
+  @Test
+  public void testUpgradeStatusReport() {
+    short status = 6;
+    int version = 15;
+    String EXPECTED__NOTF_PATTERN = "Upgrade for version {0} has been completed.\nUpgrade is not finalized.";
+    String EXPECTED_PATTERN = "Upgrade for version {0} is in progress. Status = {1}%";
+
+    UpgradeStatusReport upgradeStatusReport = new UpgradeStatusReport(version,
+        status, true);
+    assertTrue(upgradeStatusReport.getVersion() == version);
+    assertTrue(upgradeStatusReport.getUpgradeStatus() == status);
+    assertTrue(upgradeStatusReport.isFinalized());
+
+    assertEquals(MessageFormat.format(EXPECTED_PATTERN, version, status),
+        upgradeStatusReport.getStatusText(true));
+
+    status += 100;
+    upgradeStatusReport = new UpgradeStatusReport(version, status, false);
+    assertFalse(upgradeStatusReport.isFinalized());
+    assertTrue(upgradeStatusReport.toString().equals(
+        MessageFormat.format(EXPECTED__NOTF_PATTERN, version)));
+    assertTrue(upgradeStatusReport.getStatusText(false).equals(
+        MessageFormat.format(EXPECTED__NOTF_PATTERN, version)));
+    assertTrue(upgradeStatusReport.getStatusText(true).equals(
+        MessageFormat.format(EXPECTED__NOTF_PATTERN, version)));
+  }  
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Wed Oct 16 21:07:28 2013
@@ -61,9 +61,10 @@ public class TestDatanodeJsp {
     
     InetSocketAddress nnIpcAddress = cluster.getNameNode().getNameNodeAddress();
     InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
-    int dnInfoPort = cluster.getDataNodes().get(0).getInfoPort();
-    
-    URL url = new URL("http://localhost:" + dnInfoPort + "/"
+    String base = JspHelper.Url.url("http", cluster.getDataNodes().get(0)
+        .getDatanodeId());
+
+    URL url = new URL(base + "/"
         + "browseDirectory.jsp" + JspHelper.getUrlParam("dir", 
             URLEncoder.encode(testPath.toString(), "UTF-8"), true)
         + JspHelper.getUrlParam("namenodeInfoPort", Integer
@@ -86,18 +87,21 @@ public class TestDatanodeJsp {
     // check whether able to 'Go Back to File View' after tailing the file
     regex = "<a.+href=\"(.+?)\">Go\\s*Back\\s*to\\s*File\\s*View\\<\\/a\\>";
     assertFileContents(regex, "Go Back to File View");
+
+    regex = "<a href=\"///localhost:" + nnHttpAddress.getPort() + "/dfshealth.jsp\">Go back to DFS home</a>";
+    assertTrue("page should generate DFS home scheme without explicit scheme", viewFilePage.contains(regex));
   }
   
   private static void assertFileContents(String regex, String text)
       throws IOException {
     Pattern compile = Pattern.compile(regex);
     Matcher matcher = compile.matcher(viewFilePage);
-    URL hyperlink = null;
     if (matcher.find()) {
       // got hyperlink for Tail this file
-      hyperlink = new URL(matcher.group(1));
+      String u = matcher.group(1);
+      String urlString = u.startsWith("///") ? ("http://" + u.substring(3)) : u;
       viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil
-          .urlGet(hyperlink));
+          .urlGet(new URL(urlString)));
       assertTrue("page should show preview of file contents", viewFilePage
           .contains(FILE_DATA));
     } else {
@@ -166,6 +170,7 @@ public class TestDatanodeJsp {
     Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
         .when(reqMock).getParameter("nnaddr");
     Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
+    Mockito.doReturn("http").when(reqMock).getScheme();
   }
 
   static Path writeFile(FileSystem fs, Path f) throws IOException {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Oct 16 21:07:28 2013
@@ -17,14 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -33,40 +25,28 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
-import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.Groups;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.util.*;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+
 /**
  * Main class for a series of name-node benchmarks.
  * 
@@ -839,6 +819,7 @@ public class NNThroughputBenchmark imple
               DNS.getDefaultHost("default", "default"),
               "", getNodePort(dnIdx),
               DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+              DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
               DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
           new DataStorage(nsInfo),
           new ExportedBlockKeys(), VersionInfo.getVersion());
@@ -1305,7 +1286,7 @@ public class NNThroughputBenchmark imple
 
   /**
    * Main method of the benchmark.
-   * @param args command line parameters
+   * @param aArgs command line parameters
    */
   @Override // Tool
   public int run(String[] aArgs) throws Exception {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Wed Oct 16 21:07:28 2013
@@ -18,15 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-
-import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -37,6 +28,13 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.junit.Test;
 
+import java.io.IOException;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Mockito.*;
+
 /**
  * Verify that TestCommitBlockSynchronization is idempotent.
  */
@@ -177,7 +175,7 @@ public class TestCommitBlockSynchronizat
     Block block = new Block(blockId, length, genStamp);
     FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
     DatanodeID[] newTargets = new DatanodeID[]{
-        new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0)};
+        new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0)};
 
     ExtendedBlock lastBlock = new ExtendedBlock();
     namesystemSpy.commitBlockSynchronization(

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Wed Oct 16 21:07:28 2013
@@ -32,6 +32,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +52,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -930,7 +932,9 @@ public class TestINodeFile {
       dirNodeFromNode = fsdir.getInode(dirNode.getId());
       assertSame(dirNode, dirNodeFromNode);
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
   
@@ -960,7 +964,55 @@ public class TestINodeFile {
       assertTrue(parentId == status.getFileId());
       
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  @Test
+  public void testFilesInGetListingOps() throws Exception {
+    final Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      final DistributedFileSystem hdfs = cluster.getFileSystem();
+      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
+
+      hdfs.mkdirs(new Path("/tmp"));
+      DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
+      DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
+      DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);
+
+      DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
+          HdfsFileStatus.EMPTY_NAME, false);
+      assertTrue(dl.getPartialListing().length == 3);
+
+      String f2 = new String("f2");
+      dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
+      assertTrue(dl.getPartialListing().length == 1);
+
+      INode f2INode = fsdir.getINode("/tmp/f2");
+      String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
+      dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
+          false);
+      assertTrue(dl.getPartialListing().length == 1);
+
+      // Test the deleted startAfter file
+      hdfs.delete(new Path("/tmp/f2"), false);
+      try {
+        dl = cluster.getNameNodeRpc().getListing("/tmp",
+            f2InodePath.getBytes(), false);
+        fail("Didn't get exception for the deleted startAfter token.");
+      } catch (IOException e) {
+        assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
+      }
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java Wed Oct 16 21:07:28 2013
@@ -17,49 +17,71 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_EDITS;
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_FSIMAGE;
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAFEMODE;
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAVING_CHECKPOINT;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.znerd.xmlenc.XMLOutputter;
 
-public class TestNameNodeJspHelper {
+import com.google.common.collect.ImmutableSet;
 
-  private MiniDFSCluster cluster = null;
-  Configuration conf = null;
+public class TestNameNodeJspHelper {
 
-  @Before
-  public void setUp() throws Exception {
+  private static final int DATA_NODES_AMOUNT = 2;
+  
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static final String NAMENODE_ATTRIBUTE_KEY = "name.node";  
+  
+  @BeforeClass
+  public static void setUp() throws Exception {
     conf = new HdfsConfiguration();
-    cluster  = new MiniDFSCluster.Builder(conf).build();
-    cluster.waitActive();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(DATA_NODES_AMOUNT).build();
+    cluster.waitClusterUp();
   }
 
-  @After
-  public void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if (cluster != null)
       cluster.shutdown();
   }
@@ -71,23 +93,23 @@ public class TestNameNodeJspHelper {
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser");
     String tokenString = NamenodeJspHelper.getDelegationToken(nn, request,
         conf, ugi);
-    //tokenString returned must be null because security is disabled
+    // tokenString returned must be null because security is disabled
     Assert.assertEquals(null, tokenString);
   }
-  
+
   @Test
-  public void  tesSecurityModeText() {
+  public void testSecurityModeText() {
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String securityOnOff = NamenodeJspHelper.getSecurityModeText();
-    Assert.assertTrue("security mode doesn't match. Should be ON", 
+    Assert.assertTrue("security mode doesn't match. Should be ON",
         securityOnOff.contains("ON"));
-    //Security is enabled
+    // Security is enabled
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
     UserGroupInformation.setConfiguration(conf);
-    
+
     securityOnOff = NamenodeJspHelper.getSecurityModeText();
-    Assert.assertTrue("security mode doesn't match. Should be OFF", 
+    Assert.assertTrue("security mode doesn't match. Should be OFF",
         securityOnOff.contains("OFF"));
   }
 
@@ -118,8 +140,77 @@ public class TestNameNodeJspHelper {
   }
 
   /**
+   * Tests for non-null, non-empty NameNode label.
+   */
+  @Test
+  public void testGetNameNodeLabel() {
+    String nameNodeLabel = NamenodeJspHelper.getNameNodeLabel(
+      cluster.getNameNode());
+    Assert.assertNotNull(nameNodeLabel);
+    Assert.assertFalse(nameNodeLabel.isEmpty());
+  }
+
+  /**
+   * Tests for non-null, non-empty NameNode label when called before
+   * initialization of the NameNode RPC server.
+   */
+  @Test
+  public void testGetNameNodeLabelNullRpcServer() {
+    NameNode nn = mock(NameNode.class);
+    when(nn.getRpcServer()).thenReturn(null);
+    String nameNodeLabel = NamenodeJspHelper.getNameNodeLabel(
+      cluster.getNameNode());
+    Assert.assertNotNull(nameNodeLabel);
+    Assert.assertFalse(nameNodeLabel.isEmpty());
+  }
+
+  /**
+   * Tests that passing a null FSNamesystem to generateSnapshotReport does not
+   * throw NullPointerException.
+   */
+  @Test
+  public void testGenerateSnapshotReportNullNamesystem() throws Exception {
+    NamenodeJspHelper.generateSnapshotReport(mock(JspWriter.class), null);
+  }
+
+  /**
+   * Tests that redirectToRandomDataNode does not throw NullPointerException if
+   * it finds a null FSNamesystem.
+   */
+  @Test(expected=IOException.class)
+  public void testRedirectToRandomDataNodeNullNamesystem() throws Exception {
+    NameNode nn = mock(NameNode.class);
+    when(nn.getNamesystem()).thenReturn(null);
+    ServletContext context = mock(ServletContext.class);
+    when(context.getAttribute("name.node")).thenReturn(nn);
+    NamenodeJspHelper.redirectToRandomDataNode(context,
+      mock(HttpServletRequest.class), mock(HttpServletResponse.class));
+  }
+
+  /**
+   * Tests that XMLBlockInfo does not throw NullPointerException if it finds a
+   * null FSNamesystem.
+   */
+  @Test
+  public void testXMLBlockInfoNullNamesystem() throws IOException {
+    XMLOutputter doc = new XMLOutputter(mock(JspWriter.class), "UTF-8");
+    new NamenodeJspHelper.XMLBlockInfo(null, 1L).toXML(doc);
+  }
+
+  /**
+   * Tests that XMLCorruptBlockInfo does not throw NullPointerException if it
+   * finds a null FSNamesystem.
+   */
+  @Test
+  public void testXMLCorruptBlockInfoNullNamesystem() throws IOException {
+    XMLOutputter doc = new XMLOutputter(mock(JspWriter.class), "UTF-8");
+    new NamenodeJspHelper.XMLCorruptBlockInfo(null, mock(Configuration.class),
+      10, 1L).toXML(doc);
+  }
+
+  /**
    * Checks if the list contains any string that partially matches the regex.
-   * 
+   *
    * @param list List<String> containing strings to check
    * @param regex String regex to check
    * @return boolean true if some string in list partially matches regex
@@ -133,4 +224,149 @@ public class TestNameNodeJspHelper {
     }
     return false;
   }
+
+  @Test(timeout = 15000)
+  public void testGetRandomDatanode() {
+    ImmutableSet<String> set = ImmutableSet.of();
+    NameNode nameNode = cluster.getNameNode();
+    ImmutableSet.Builder<String> builder = ImmutableSet.builder();
+    for (DataNode dataNode : cluster.getDataNodes()) {
+      builder.add(dataNode.getDisplayName());
+    }
+    set = builder.build();
+
+    for (int i = 0; i < 10; i++) {
+      DatanodeDescriptor dnDescriptor = NamenodeJspHelper
+          .getRandomDatanode(nameNode);
+      assertTrue("testGetRandomDatanode error",
+          set.contains(dnDescriptor.toString()));
+    }
+  }
+      
+  @Test(timeout = 15000)
+  public void testNamenodeJspHelperRedirectToRandomDataNode() throws IOException, InterruptedException {
+    final String urlPart = "browseDirectory.jsp?namenodeInfoPort=";                     
+    
+    ServletContext context = mock(ServletContext.class);
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    HttpServletResponse resp = mock(HttpServletResponse.class);          
+    
+    when(request.getScheme()).thenReturn("http");
+    when(request.getParameter(UserParam.NAME)).thenReturn("localuser");
+    when(context.getAttribute(NAMENODE_ATTRIBUTE_KEY)).thenReturn(
+        cluster.getNameNode());
+    when(context.getAttribute(JspHelper.CURRENT_CONF)).thenReturn(conf);    
+    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
+    doAnswer(new Answer<String>() {
+      @Override
+     public String answer(InvocationOnMock invocation) throws Throwable {
+        return null;
+        }
+    }).when(resp).sendRedirect(captor.capture());
+
+    NamenodeJspHelper.redirectToRandomDataNode(context, request, resp);    
+    assertTrue(captor.getValue().contains(urlPart));    
+  }
+  
+  private enum DataNodeStatus {
+    LIVE("[Live Datanodes(| +):(| +)]\\d"), 
+    DEAD("[Dead Datanodes(| +):(| +)]\\d");
+
+    private Pattern pattern;
+
+    public Pattern getPattern() {
+      return pattern;
+    }
+
+    DataNodeStatus(String line) {
+      this.pattern = Pattern.compile(line);
+    }
+  }
+
+  private void checkDeadLiveNodes(NameNode nameNode, int deadCount,
+      int lifeCount) {
+    FSNamesystem ns = nameNode.getNamesystem();
+    DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
+    List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    dm.fetchDatanodes(live, dead, true);
+    assertTrue("checkDeadLiveNodes error !!!", (live.size() == lifeCount)
+        && dead.size() == deadCount);
+  }
+
+  @Test(timeout = 15000)
+  public void testNodeListJspGenerateNodesList() throws IOException {
+    String output;
+    NameNode nameNode = cluster.getNameNode();
+    ServletContext context = mock(ServletContext.class);
+    when(context.getAttribute("name.node")).thenReturn(nameNode);
+    when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY))
+        .thenReturn(cluster.getNameNode().getHttpAddress());    
+    checkDeadLiveNodes(nameNode, 0, DATA_NODES_AMOUNT);
+    output = getOutputFromGeneratedNodesList(context, DataNodeStatus.LIVE);
+    assertCounts(DataNodeStatus.LIVE, output, DATA_NODES_AMOUNT);
+    output = getOutputFromGeneratedNodesList(context, DataNodeStatus.DEAD);
+    assertCounts(DataNodeStatus.DEAD, output, 0);    
+  }
+
+  private void assertCounts(DataNodeStatus dataNodeStatus, String output,
+      int expectedCount) {
+    Matcher matcher = DataNodeStatus.LIVE.getPattern().matcher(output);
+    if (matcher.find()) {
+      String digitLine = output.substring(matcher.start(), matcher.end())
+          .trim();
+      assertTrue("assertCounts error. actual != expected",
+          Integer.valueOf(digitLine) == expectedCount);
+    } else {
+      fail("assertCount matcher error");
+    }
+  }
+
+  private String getOutputFromGeneratedNodesList(ServletContext context,
+      DataNodeStatus dnStatus) throws IOException {
+    JspWriter out = mock(JspWriter.class);
+    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
+    NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeListJsp();
+    final StringBuffer buffer = new StringBuffer();
+    doAnswer(new Answer<String>() {
+      @Override
+      public String answer(InvocationOnMock invok) {
+        Object[] args = invok.getArguments();
+        buffer.append((String) args[0]);
+        return null;
+      }
+    }).when(out).print(captor.capture());
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    when(request.getScheme()).thenReturn("http");
+    when(request.getParameter("whatNodes")).thenReturn(dnStatus.name());
+    nodelistjsp.generateNodesList(context, out, request);
+    return buffer.toString();
+  }
+
+  @Test(timeout = 15000)
+  public void testGetInodeLimitText() {
+    NameNode nameNode = cluster.getNameNode();
+    FSNamesystem fsn = nameNode.getNamesystem();
+    ImmutableSet<String> patterns = 
+        ImmutableSet.of("files and directories", "Heap Memory used", "Non Heap Memory used");        
+    String line = NamenodeJspHelper.getInodeLimitText(fsn);    
+    for(String pattern: patterns) {
+      assertTrue("testInodeLimitText error " + pattern,
+          line.contains(pattern));
+    }    
+  }
+  
+  @Test(timeout = 15000)
+  public void testGetVersionTable() {
+    NameNode nameNode = cluster.getNameNode();
+    FSNamesystem fsn = nameNode.getNamesystem();
+    ImmutableSet<String> patterns = ImmutableSet.of(VersionInfo.getVersion(), 
+        VersionInfo.getRevision(), VersionInfo.getUser(), VersionInfo.getBranch(),
+        fsn.getClusterId(), fsn.getBlockPoolId());
+    String line = NamenodeJspHelper.getVersionTable(fsn);
+    for(String pattern: patterns) {
+       assertTrue("testGetVersionTable error " + pattern,
+           line.contains(pattern));
+    }
+  }  
 }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Oct 16 21:07:28 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -35,11 +36,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.ClientId;
@@ -47,7 +52,9 @@ import org.apache.hadoop.ipc.RPC.RpcKind
 import org.apache.hadoop.ipc.RetryCache.CacheEntry;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.LightWeightCache;
 import org.junit.After;
 import org.junit.Assert;
@@ -75,12 +82,13 @@ public class TestNamenodeRetryCache {
       "TestNamenodeRetryCache", null, FsPermission.getDefault());
   private static DistributedFileSystem filesystem;
   private static int callId = 100;
-  private static Configuration conf = new HdfsConfiguration();
+  private static Configuration conf;
   private static final int BlockSize = 512;
   
   /** Start a cluster */
   @Before
   public void setup() throws Exception {
+    conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -294,6 +302,41 @@ public class TestNamenodeRetryCache {
   }
   
   /**
+   * Make sure a retry call does not hang because of the exception thrown in the
+   * first call.
+   */
+  @Test(timeout = 60000)
+  public void testUpdatePipelineWithFailOver() throws Exception {
+    cluster.shutdown();
+    namesystem = null;
+    filesystem = null;
+    cluster = new MiniDFSCluster.Builder(conf).nnTopology(
+        MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
+    FSNamesystem ns0 = cluster.getNamesystem(0);
+    ExtendedBlock oldBlock = new ExtendedBlock();
+    ExtendedBlock newBlock = new ExtendedBlock();
+    DatanodeID[] newNodes = new DatanodeID[2];
+    String[] newStorages = new String[2];
+    
+    newCall();
+    try {
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
+      fail("Expect StandbyException from the updatePipeline call");
+    } catch (StandbyException e) {
+      // expected, since in the beginning both nn are in standby state
+      GenericTestUtils.assertExceptionContains(
+          HAServiceState.STANDBY.toString(), e);
+    }
+    
+    cluster.transitionToActive(0);
+    try {
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
+    } catch (IOException e) {
+      // ignore call should not hang.
+    }
+  }
+  
+  /**
    * Test for crateSnapshot
    */
   @Test

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java Wed Oct 16 21:07:28 2013
@@ -73,24 +73,28 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
@@ -111,11 +115,13 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 100L)
               .put("total", 100L)
               .put("percentComplete", 1.0f)
@@ -124,6 +130,7 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "RUNNING")
           .put("percentComplete", 0.5f)
           .put("steps", Collections.<Object>singletonList(
@@ -138,12 +145,14 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
@@ -164,11 +173,13 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 100L)
               .put("total", 100L)
               .put("percentComplete", 1.0f)
@@ -177,6 +188,7 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
@@ -191,11 +203,13 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 300L)
               .put("total", 300L)
               .put("percentComplete", 1.0f)
@@ -204,11 +218,13 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "AwaitingReportedBlocks")
+              .put("desc", "awaiting reported blocks")
               .put("count", 400L)
               .put("total", 400L)
               .put("percentComplete", 1.0f)

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Wed Oct 16 21:07:28 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -47,19 +48,22 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import com.google.common.base.Joiner;
 
@@ -78,8 +82,12 @@ public class TestDelegationTokensWithHA 
   private static DelegationTokenSecretManager dtSecretManager;
   private static DistributedFileSystem dfs;
 
-  @BeforeClass
-  public static void setupCluster() throws Exception {
+  private volatile boolean catchup = false;
+  
+  @Before
+  public void setupCluster() throws Exception {
+    SecurityUtilTestHelper.setTokenServiceUseIp(true);
+    
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
@@ -101,18 +109,12 @@ public class TestDelegationTokensWithHA 
         nn0.getNamesystem());
   }
 
-  @AfterClass
-  public static void shutdownCluster() throws IOException {
+  @After
+  public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
-
-
-  @Before
-  public void prepTest() {
-    SecurityUtilTestHelper.setTokenServiceUseIp(true);
-  }
   
   @Test
   public void testDelegationTokenDFSApi() throws Exception {
@@ -155,6 +157,96 @@ public class TestDelegationTokensWithHA 
     doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
   }
   
+  private class EditLogTailerForTest extends EditLogTailer {
+    public EditLogTailerForTest(FSNamesystem namesystem, Configuration conf) {
+      super(namesystem, conf);
+    }
+    
+    public void catchupDuringFailover() throws IOException {
+      synchronized (TestDelegationTokensWithHA.this) {
+        while (!catchup) {
+          try {
+            LOG.info("The editlog tailer is waiting to catchup...");
+            TestDelegationTokensWithHA.this.wait();
+          } catch (InterruptedException e) {}
+        }
+      }
+      super.catchupDuringFailover();
+    }
+  }
+  
+  /**
+   * Test if correct exception (StandbyException or RetriableException) can be
+   * thrown during the NN failover. 
+   */
+  @Test
+  public void testDelegationTokenDuringNNFailover() throws Exception {
+    EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
+    // stop the editLogTailer of nn1
+    editLogTailer.stop();
+    Configuration conf = (Configuration) Whitebox.getInternalState(
+        editLogTailer, "conf");
+    nn1.getNamesystem().setEditLogTailerForTests(
+        new EditLogTailerForTest(nn1.getNamesystem(), conf));
+    
+    // create token
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(fs, "JobTracker");
+    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+    byte[] tokenId = token.getIdentifier();
+    identifier.readFields(new DataInputStream(
+             new ByteArrayInputStream(tokenId)));
+
+    // Ensure that it's present in the nn0 secret manager and can
+    // be renewed directly from there.
+    LOG.info("A valid token should have non-null password, " +
+        "and should be renewed successfully");
+    assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    dtSecretManager.renewToken(token, "JobTracker");
+    
+    // transition nn0 to standby
+    cluster.transitionToStandby(0);
+    
+    try {
+      cluster.getNameNodeRpc(0).renewDelegationToken(token);
+      fail("StandbyException is expected since nn0 is in standby state");
+    } catch (StandbyException e) {
+      GenericTestUtils.assertExceptionContains(
+          HAServiceState.STANDBY.toString(), e);
+    }
+    
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          cluster.transitionToActive(1);
+        } catch (Exception e) {
+          LOG.error("Transition nn1 to active failed", e);
+        }    
+      }
+    }.start();
+    
+    Thread.sleep(1000);
+    try {
+      nn1.getNamesystem().verifyToken(token.decodeIdentifier(),
+          token.getPassword());
+      fail("RetriableException/StandbyException is expected since nn1 is in transition");
+    } catch (IOException e) {
+      assertTrue(e instanceof StandbyException
+          || e instanceof RetriableException);
+      LOG.info("Got expected exception", e);
+    }
+    
+    catchup = true;
+    synchronized (this) {
+      this.notifyAll();
+    }
+    
+    Configuration clientConf = dfs.getConf();
+    doRenewOrCancel(token, clientConf, TokenTestAction.RENEW);
+    doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
+  }
+  
   @SuppressWarnings("deprecation")
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java Wed Oct 16 21:07:28 2013
@@ -17,12 +17,18 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -101,6 +107,50 @@ public class TestHASafeMode {
     }
   }
   
+  /**
+   * Make sure the client retries when the active NN is in safemode
+   */
+  @Test (timeout=300000)
+  public void testClientRetrySafeMode() throws Exception {
+    final Map<Path, Boolean> results = Collections
+        .synchronizedMap(new HashMap<Path, Boolean>());
+    final Path test = new Path("/test");
+    // let nn0 enter safemode
+    NameNodeAdapter.enterSafeMode(nn0, false);
+    LOG.info("enter safemode");
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          boolean mkdir = fs.mkdirs(test);
+          LOG.info("mkdir finished, result is " + mkdir);
+          synchronized (TestHASafeMode.this) {
+            results.put(test, mkdir);
+            TestHASafeMode.this.notifyAll();
+          }
+        } catch (Exception e) {
+          LOG.info("Got Exception while calling mkdir", e);
+        }
+      }
+    }.start();
+    
+    // make sure the client's call has actually been handled by the active NN
+    assertFalse("The directory should not be created while NN in safemode",
+        fs.exists(test));
+    
+    Thread.sleep(1000);
+    // let nn0 leave safemode
+    NameNodeAdapter.leaveSafeMode(nn0);
+    LOG.info("leave safemode");
+    
+    synchronized (this) {
+      while (!results.containsKey(test)) {
+        this.wait();
+      }
+      assertTrue(results.get(test));
+    }
+  }
+  
   private void restartStandby() throws IOException {
     cluster.shutdownNameNode(1);
     // Set the safemode extension to be lengthy, so that the tests

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Oct 16 21:07:28 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
@@ -646,10 +647,14 @@ public class TestRetryCacheWithHA {
     @Override
     boolean checkNamenodeBeforeReturn() throws Exception {
       Path linkPath = new Path(link);
-      FileStatus linkStatus = dfs.getFileLinkStatus(linkPath);
+      FileStatus linkStatus = null;
       for (int i = 0; i < CHECKTIMES && linkStatus == null; i++) {
-        Thread.sleep(1000);
-        linkStatus = dfs.getFileLinkStatus(linkPath);
+        try {
+          linkStatus = dfs.getFileLinkStatus(linkPath);
+        } catch (FileNotFoundException fnf) {
+          // Ignoring, this can be legitimate.
+          Thread.sleep(1000);
+        }
       }
       return linkStatus != null;
     }
@@ -858,4 +863,4 @@ public class TestRetryCacheWithHA {
           + results.get(op.name));
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Wed Oct 16 21:07:28 2013
@@ -192,17 +192,10 @@ public class TestNameNodeMetrics {
     assertCounter("CreateFileOps", 1L, rb);
     assertCounter("FilesCreated", (long)file.depth(), rb);
 
-    // Blocks are stored in a hashmap. Compute its capacity, which
-    // doubles every time the number of entries reach the threshold.
-    int threshold = (int)(blockCapacity * BlockManager.DEFAULT_MAP_LOAD_FACTOR);
-    while (threshold < blockCount) {
-      blockCapacity <<= 1;
-    }
     long filesTotal = file.depth() + 1; // Add 1 for root
     rb = getMetrics(NS_METRICS);
     assertGauge("FilesTotal", filesTotal, rb);
     assertGauge("BlocksTotal", blockCount, rb);
-    assertGauge("BlockCapacity", blockCapacity, rb);
     fs.delete(file, true);
     filesTotal--; // reduce the filecount for deleted file
 

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1532910&r1=1532909&r2=1532910&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Wed Oct 16 21:07:28 2013
@@ -25,10 +25,12 @@ import static org.junit.Assert.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.security.PrivilegedAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -45,7 +47,9 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.Quota;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -777,7 +781,40 @@ public class TestSnapshotDeletion {
     assertEquals("user1", statusOfS1.getOwner());
     assertEquals("group1", statusOfS1.getGroup());
   }
-  
+
+  @Test
+  public void testDeleteSnapshotWithPermissionsDisabled() throws Exception {
+    cluster.shutdown();
+    Configuration newConf = new Configuration(conf);
+    newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+    cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build();
+    cluster.waitActive();
+    hdfs = cluster.getFileSystem();
+
+    final Path path = new Path("/dir");
+    hdfs.mkdirs(path);
+    hdfs.allowSnapshot(path);
+    hdfs.mkdirs(new Path(path, "/test"));
+    hdfs.createSnapshot(path, "s1");
+    UserGroupInformation anotherUser = UserGroupInformation
+        .createRemoteUser("anotheruser");
+    anotherUser.doAs(new PrivilegedAction<Object>() {
+      @Override
+      public Object run() {
+        DistributedFileSystem anotherUserFS = null;
+        try {
+          anotherUserFS = cluster.getFileSystem();
+          anotherUserFS.deleteSnapshot(path, "s1");
+        } catch (IOException e) {
+          fail("Failed to delete snapshot : " + e.getLocalizedMessage());
+        } finally {
+          IOUtils.closeStream(anotherUserFS);
+        }
+        return null;
+      }
+    });
+  }
+
   /** 
    * A test covering the case where the snapshot diff to be deleted is renamed 
    * to its previous snapshot. 



Mime
View raw message