hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1563042 [2/2] - in /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/qjournal/server/ src/main/java/org/apache/hadoop/hdfs/server...
Date Fri, 31 Jan 2014 03:24:28 GMT
Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1563042&r1=1563041&r2=1563042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
Fri Jan 31 03:24:24 2014
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -528,77 +529,111 @@ public class TestCacheDirectives {
 
   @Test(timeout=60000)
   public void testCacheManagerRestart() throws Exception {
-    // Create and validate a pool
-    final String pool = "poolparty";
-    String groupName = "partygroup";
-    FsPermission mode = new FsPermission((short)0777);
-    long limit = 747;
-    dfs.addCachePool(new CachePoolInfo(pool)
-        .setGroupName(groupName)
-        .setMode(mode)
-        .setLimit(limit));
-    RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    CachePoolInfo info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
-  
-    // Create some cache entries
-    int numEntries = 10;
-    String entryPrefix = "/party-";
-    long prevId = -1;
-    final Date expiry = new Date();
-    for (int i=0; i<numEntries; i++) {
-      prevId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path(entryPrefix + i)).setPool(pool).
-            setExpiration(
-                CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
-            build());
-    }
-    RemoteIterator<CacheDirectiveEntry> dit
-        = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-    }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-  
-    // Restart namenode
-    cluster.restartNameNode();
+    SecondaryNameNode secondary = null;
+    try {
+      // Start a secondary namenode
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+          "0.0.0.0:0");
+      secondary = new SecondaryNameNode(conf);
   
-    // Check that state came back up
-    pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
+      // Create and validate a pool
+      final String pool = "poolparty";
+      String groupName = "partygroup";
+      FsPermission mode = new FsPermission((short)0777);
+      long limit = 747;
+      dfs.addCachePool(new CachePoolInfo(pool)
+          .setGroupName(groupName)
+          .setMode(mode)
+          .setLimit(limit));
+      RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      CachePoolInfo info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      // Create some cache entries
+      int numEntries = 10;
+      String entryPrefix = "/party-";
+      long prevId = -1;
+      final Date expiry = new Date();
+      for (int i=0; i<numEntries; i++) {
+        prevId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path(entryPrefix + i)).setPool(pool).
+              setExpiration(
+                  CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
+              build());
+      }
+      RemoteIterator<CacheDirectiveEntry> dit
+          = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
+      
+      // Checkpoint once to set some cache pools and directives on 2NN side
+      secondary.doCheckpoint();
+      
+      // Add some more CacheManager state
+      final String imagePool = "imagePool";
+      dfs.addCachePool(new CachePoolInfo(imagePool));
+      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
+        .setPath(new Path("/image")).setPool(imagePool).build());
+
+      // Save a new image to force a fresh fsimage download
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      dfs.saveNamespace();
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+      // Checkpoint again forcing a reload of FSN state
+      boolean fetchImage = secondary.doCheckpoint();
+      assertTrue("Secondary should have fetched a new fsimage from NameNode",
+          fetchImage);
+
+      // Remove temp pool and directive
+      dfs.removeCachePool(imagePool);
+
+      // Restart namenode
+      cluster.restartNameNode();
+    
+      // Check that state came back up
+      pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      dit = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+        assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
   
-    dit = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-      assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      long nextId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path("/foobar")).setPool(pool).build());
+      assertEquals(prevId + 1, nextId);
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
     }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-
-    long nextId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path("/foobar")).setPool(pool).build());
-    assertEquals(prevId + 1, nextId);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1563042&r1=1563041&r2=1563042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Fri Jan 31 03:24:24 2014
@@ -1635,7 +1635,7 @@ public class TestCheckpoint {
    * Test that the secondary namenode correctly deletes temporary edits
    * on startup.
    */
-  @Test(timeout = 30000)
+  @Test(timeout = 60000)
   public void testDeleteTemporaryEditsOnStartup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     SecondaryNameNode secondary = null;

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java?rev=1563042&r1=1563041&r2=1563042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
Fri Jan 31 03:24:24 2014
@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -66,7 +66,7 @@ public class TestGetImageServlet {
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
     
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
     assertTrue(GetImageServlet.isValidRequestor(context,

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1563042&r1=1563041&r2=1563042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
Fri Jan 31 03:24:24 2014
@@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -119,7 +119,7 @@ public class TestTransferFsImage {
    */
   @Test(timeout = 5000)
   public void testImageTransferTimeout() throws Exception {
-    HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
+    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
     try {
       testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
       testServer.start();

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java?rev=1563042&r1=1563041&r2=1563042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
(original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
Fri Jan 31 03:24:24 2014
@@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -89,7 +89,7 @@ public class SnapshotTestHelper {
     setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
     
     setLevel2OFF(DataBlockScanner.LOG);
-    setLevel2OFF(HttpServer.LOG);
+    setLevel2OFF(HttpServer2.LOG);
     setLevel2OFF(DataNode.LOG);
     setLevel2OFF(BlockPoolSliceStorage.LOG);
     setLevel2OFF(LeaseManager.LOG);



Mime
View raw message