hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1339410 [2/3] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ s...
Date Wed, 16 May 2012 22:47:41 GMT
Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
Wed May 16 22:47:37 2012
@@ -21,10 +21,6 @@ import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 
-import junit.extensions.TestSetup;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -43,9 +39,13 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /** This class implements some of tests posted in HADOOP-2658. */
-public class TestFileAppend3 extends junit.framework.TestCase {
+public class TestFileAppend3  {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -64,29 +64,28 @@ public class TestFileAppend3 extends jun
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
 
-  public static Test suite() {
-    return new TestSetup(new TestSuite(TestFileAppend3.class)) {
-      protected void setUp() throws java.lang.Exception {
-        AppendTestUtil.LOG.info("setUp()");
-        conf = new HdfsConfiguration();
-        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
-        buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
-        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
-        fs = (DistributedFileSystem)cluster.getFileSystem();
-      }
-    
-      protected void tearDown() throws Exception {
-        AppendTestUtil.LOG.info("tearDown()");
-        if(fs != null) fs.close();
-        if(cluster != null) cluster.shutdown();
-      }
-    };  
+  @BeforeClass
+  public static void setUp() throws java.lang.Exception {
+    AppendTestUtil.LOG.info("setUp()");
+    conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
+    buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
+    fs = (DistributedFileSystem)cluster.getFileSystem();
+  }
+   
+  @AfterClass
+  public static void tearDown() throws Exception {
+    AppendTestUtil.LOG.info("tearDown()");
+    if(fs != null) fs.close();
+    if(cluster != null) cluster.shutdown();
   }
 
   /**
    * TC1: Append on block boundary.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC1() throws Exception {
     final Path p = new Path("/TC1/foo");
     System.out.println("p=" + p);
@@ -115,6 +114,7 @@ public class TestFileAppend3 extends jun
    * TC2: Append on non-block boundary.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC2() throws Exception {
     final Path p = new Path("/TC2/foo");
     System.out.println("p=" + p);
@@ -145,6 +145,7 @@ public class TestFileAppend3 extends jun
    * TC5: Only one simultaneous append.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC5() throws Exception {
     final Path p = new Path("/TC5/foo");
     System.out.println("p=" + p);
@@ -175,6 +176,7 @@ public class TestFileAppend3 extends jun
    * TC7: Corrupted replicas are present.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC7() throws Exception {
     final short repl = 2;
     final Path p = new Path("/TC7/foo");
@@ -224,6 +226,7 @@ public class TestFileAppend3 extends jun
    * TC11: Racing rename
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC11() throws Exception {
     final Path p = new Path("/TC11/foo");
     System.out.println("p=" + p);
@@ -282,6 +285,7 @@ public class TestFileAppend3 extends jun
    * TC12: Append to partial CRC chunk
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testTC12() throws Exception {
     final Path p = new Path("/TC12/foo");
     System.out.println("p=" + p);
@@ -313,6 +317,7 @@ public class TestFileAppend3 extends jun
    * *
    * @throws IOException
    */
+  @Test
   public void testAppendToPartialChunk() throws IOException {
     final Path p = new Path("/partialChunk/foo");
     final int fileLen = 513;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
Wed May 16 22:47:37 2012
@@ -31,11 +31,13 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
 
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests client lease recovery.
  */
-public class TestFileCreationClient extends junit.framework.TestCase {
+public class TestFileCreationClient {
   static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
 
   {
@@ -46,6 +48,7 @@ public class TestFileCreationClient exte
   }
 
   /** Test lease recovery Triggered by DFSClient. */
+  @Test
   public void testClientTriggeredLeaseRecovery() throws Exception {
     final int REPLICATION = 3;
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
Wed May 16 22:47:37 2012
@@ -101,18 +101,18 @@ public class TestGetBlocks extends TestC
       BlockWithLocations[] locs;
       locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
       assertEquals(locs.length, 2);
-      assertEquals(locs[0].getDatanodes().length, 2);
-      assertEquals(locs[1].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
+      assertEquals(locs[1].getStorageIDs().length, 2);
 
       // get blocks of size BlockSize from dataNodes[0]
       locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
       assertEquals(locs.length, 1);
-      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 1 from dataNodes[0]
       locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
       assertEquals(locs.length, 1);
-      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[0].getStorageIDs().length, 2);
 
       // get blocks of size 0 from dataNodes[0]
       getBlocksWithException(namenode, dataNodes[0], 0);     

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
Wed May 16 22:47:37 2012
@@ -38,8 +38,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 /**
- * This class tests that a file need not be closed before its
- * data can be read by another client.
+ * This class tests that data nodes are correctly replaced on failure.
  */
 public class TestReplaceDatanodeOnFailure {
   static final Log LOG = AppendTestUtil.LOG;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
Wed May 16 22:47:37 2012
@@ -161,7 +161,7 @@ public class TestPBHelper {
 
   private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
     assertEquals(locs1.getBlock(), locs2.getBlock());
-    assertTrue(Arrays.equals(locs1.getDatanodes(), locs2.getDatanodes()));
+    assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
   }
 
   @Test

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
Wed May 16 22:47:37 2012
@@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -115,7 +116,7 @@ public class TestBPOfferService {
             0, HdfsConstants.LAYOUT_VERSION))
       .when(mock).versionRequest();
     
-    Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
+    Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
       .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Wed May 16 22:47:37 2012
@@ -35,10 +35,12 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
@@ -765,6 +767,7 @@ public class NNThroughputBenchmark {
     ArrayList<Block> blocks;
     int nrBlocks; // actual number of blocks
     long[] blockReportList;
+    int dnIdx;
 
     /**
      * Return a a 6 digit integer port.
@@ -780,11 +783,7 @@ public class NNThroughputBenchmark {
     }
 
     TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
-      String ipAddr = DNS.getDefaultIP("default");
-      String hostName = DNS.getDefaultHost("default", "default");
-      dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
-      dnRegistration.setHostName(hostName);
-      dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
+      this.dnIdx = dnIdx;
       this.blocks = new ArrayList<Block>(blockCapacity);
       this.nrBlocks = 0;
     }
@@ -800,7 +799,14 @@ public class NNThroughputBenchmark {
     void register() throws IOException {
       // get versions from the namenode
       nsInfo = nameNodeProto.versionRequest();
-      dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
+      dnRegistration = new DatanodeRegistration(
+          new DatanodeID(DNS.getDefaultIP("default"),
+              DNS.getDefaultHost("default", "default"),
+              "", getNodePort(dnIdx),
+              DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+              DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
+          new DataStorage(nsInfo, ""),
+          new ExportedBlockKeys(), VersionInfo.getVersion());
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
       dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
@@ -896,12 +902,9 @@ public class NNThroughputBenchmark {
         for(int t = 0; t < blockTargets.length; t++) {
           DatanodeInfo dnInfo = blockTargets[t];
           DatanodeRegistration receivedDNReg;
-          receivedDNReg =
-            new DatanodeRegistration(dnInfo.getIpAddr(), dnInfo.getXferPort());
-          receivedDNReg.setStorageInfo(
-            new DataStorage(nsInfo, dnInfo.getStorageID()));
-          receivedDNReg.setInfoPort(dnInfo.getInfoPort());
-          receivedDNReg.setIpcPort(dnInfo.getIpcPort());
+          receivedDNReg = new DatanodeRegistration(dnInfo,
+            new DataStorage(nsInfo, dnInfo.getStorageID()),
+            new ExportedBlockKeys(), VersionInfo.getVersion());
           ReceivedDeletedBlockInfo[] rdBlocks = {
             new ReceivedDeletedBlockInfo(
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
Wed May 16 22:47:37 2012
@@ -527,7 +527,7 @@ public class TestEditLog extends TestCas
     } catch (IOException e) {
       // expected
       assertEquals("Cause of exception should be ChecksumException",
-          e.getCause().getClass(), ChecksumException.class);
+          ChecksumException.class, e.getCause().getClass());
     }
   }
 

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
Wed May 16 22:47:37 2012
@@ -68,7 +68,7 @@ public class TestEditLogFileOutputStream
     assertEquals(1, validation.getNumTransactions());
     assertEquals("Edit log should have 1MB pre-allocated, plus 4 bytes " +
         "for the version number",
-        PREALLOCATION_LENGTH, editLog.length());
+        EditLogFileOutputStream.PREALLOCATION_LENGTH + 4, editLog.length());
     
 
     cluster.getFileSystem().mkdirs(new Path("/tmp"),
@@ -82,7 +82,7 @@ public class TestEditLogFileOutputStream
     assertEquals(2, validation.getNumTransactions());
 
     assertEquals("Edit log should be 1MB long, plus 4 bytes for the version number",
-        PREALLOCATION_LENGTH, editLog.length());
+        EditLogFileOutputStream.PREALLOCATION_LENGTH + 4, editLog.length());
     // 256 blocks for the 1MB of preallocation space
     assertTrue("Edit log disk space used should be at least 257 blocks",
         256 * 4096 <= new DU(editLog, conf).getUsed());

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
Wed May 16 22:47:37 2012
@@ -25,6 +25,8 @@ import java.util.HashSet;
 import java.util.Set;
 
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,7 +39,6 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -213,15 +214,129 @@ public class TestNameNodeRecovery {
   public void testSkipEdit() throws IOException {
     runEditLogTest(new EltsTestGarbageInEditLog());
   }
-  
-  /** Test that we can successfully recover from a situation where the last
-   * entry in the edit log has been truncated. */
-  @Test(timeout=180000)
-  public void testRecoverTruncatedEditLog() throws IOException {
+
+  /**
+   * An algorithm for corrupting an edit log.
+   */
+  static interface Corruptor {
+    /*
+     * Corrupt an edit log file.
+     *
+     * @param editFile   The edit log file
+     */
+    public void corrupt(File editFile) throws IOException;
+
+    /*
+     * Explain whether we need to read the log in recovery mode
+     *
+     * @param finalized  True if the edit log in question is finalized.
+     *                   We're a little more lax about reading unfinalized
+     *                   logs.  We will allow a small amount of garbage at
+     *                   the end.  In a finalized log, every byte must be
+     *                   perfect.
+     *
+     * @return           Whether we need to read the log in recovery mode
+     */
+    public boolean needRecovery(boolean finalized);
+
+    /*
+     * Get the name of this corruptor
+     *
+     * @return           The Corruptor name
+     */
+    public String getName();
+  }
+
+  static class TruncatingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Corrupt the last edit
+      long fileLen = editFile.length();
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.setLength(fileLen - 1);
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      return finalized;
+    }
+
+    @Override
+    public String getName() {
+      return "truncated";
+    }
+  }
+
+  static class PaddingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      for (int i = 0; i < 129; i++) {
+        rwf.write((byte)0);
+      }
+      rwf.write(0xd);
+      rwf.write(0xe);
+      rwf.write(0xa);
+      rwf.write(0xd);
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      // With finalized edit logs, we ignore what's at the end as long as we
+      // can make it to the correct transaction ID.
+      // With unfinalized edit logs, the finalization process ignores garbage
+      // at the end.
+      return false;
+    }
+
+    @Override
+    public String getName() {
+      return "padFatal";
+    }
+  }
+
+  static class SafePaddingCorruptor implements Corruptor {
+    private byte padByte;
+
+    public SafePaddingCorruptor(byte padByte) {
+      this.padByte = padByte;
+      assert ((this.padByte == 0) || (this.padByte == -1));
+    }
+
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      rwf.write((byte)-1);
+      for (int i = 0; i < 1024; i++) {
+        rwf.write(padByte);
+      }
+      rwf.close();
+    }
+
+    @Override
+    public boolean needRecovery(boolean finalized) {
+      return false;
+    }
+
+    @Override
+    public String getName() {
+      return "pad" + ((int)padByte);
+    }
+  }
+
+  static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
+      throws IOException {
     final String TEST_PATH = "/test/path/dir";
     final int NUM_TEST_MKDIRS = 10;
-    
-    // start a cluster 
+    final boolean needRecovery = corruptor.needRecovery(finalize);
+
+    // start a cluster
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     FileSystem fileSys = null;
@@ -230,6 +345,15 @@ public class TestNameNodeRecovery {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .build();
       cluster.waitActive();
+      if (!finalize) {
+        // Normally, the in-progress edit log would be finalized by
+        // FSEditLog#endCurrentLogSegment.  For testing purposes, we
+        // disable that here.
+        FSEditLog spyLog =
+            spy(cluster.getNameNode().getFSImage().getEditLog());
+        doNothing().when(spyLog).endCurrentLogSegment(true);
+        cluster.getNameNode().getFSImage().setEditLogForTesting(spyLog);
+      }
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();
       FSImage fsimage = namesystem.getFSImage();
@@ -246,13 +370,11 @@ public class TestNameNodeRecovery {
     File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
     assertTrue("Should exist: " + editFile, editFile.exists());
 
-    // Corrupt the last edit
-    long fileLen = editFile.length();
-    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
-    rwf.setLength(fileLen - 1);
-    rwf.close();
-    
-    // Make sure that we can't start the cluster normally before recovery
+    // Corrupt the edit log
+    corruptor.corrupt(editFile);
+
+    // If needRecovery == true, make sure that we can't start the
+    // cluster normally before recovery
     cluster = null;
     try {
       LOG.debug("trying to start normally (this should fail)...");
@@ -260,16 +382,24 @@ public class TestNameNodeRecovery {
           .format(false).build();
       cluster.waitActive();
       cluster.shutdown();
-      fail("expected the truncated edit log to prevent normal startup");
+      if (needRecovery) {
+        fail("expected the corrupted edit log to prevent normal startup");
+      }
     } catch (IOException e) {
-      // success
+      if (!needRecovery) {
+        LOG.error("Got unexpected failure with " + corruptor.getName() +
+            corruptor, e);
+        fail("got unexpected exception " + e.getMessage());
+      }
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
-    
-    // Perform recovery
+
+    // Perform NameNode recovery.
+    // Even if there was nothing wrong previously (needRecovery == false),
+    // this should still work fine.
     cluster = null;
     try {
       LOG.debug("running recovery...");
@@ -277,22 +407,22 @@ public class TestNameNodeRecovery {
           .format(false).startupOption(recoverStartOpt).build();
     } catch (IOException e) {
       fail("caught IOException while trying to recover. " +
-          "message was " + e.getMessage() + 
+          "message was " + e.getMessage() +
           "\nstack trace\n" + StringUtils.stringifyException(e));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
     }
-    
+
     // Make sure that we can start the cluster normally after recovery
     cluster = null;
     try {
       LOG.debug("starting cluster normally after recovery...");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .format(false).build();
-      LOG.debug("testRecoverTruncatedEditLog: successfully recovered the " +
-          "truncated edit log");
+      LOG.debug("successfully recovered the " + corruptor.getName() +
+          " corrupted edit log");
       assertTrue(cluster.getFileSystem().exists(new Path(TEST_PATH)));
     } catch (IOException e) {
       fail("failed to recover.  Error message: " + e.getMessage());
@@ -302,4 +432,36 @@ public class TestNameNodeRecovery {
       }
     }
   }
+
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been truncated. */
+  @Test(timeout=180000)
+  public void testRecoverTruncatedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new TruncatingCorruptor(), true);
+    testNameNodeRecoveryImpl(new TruncatingCorruptor(), false);
+  }
+
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been padded with garbage. */
+  @Test(timeout=180000)
+  public void testRecoverPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new PaddingCorruptor(), true);
+    testNameNodeRecoveryImpl(new PaddingCorruptor(), false);
+  }
+
+  /** Test that don't need to recover from a situation where the last
+   * entry in the edit log has been padded with 0. */
+  @Test(timeout=180000)
+  public void testRecoverZeroPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), true);
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), false);
+  }
+
+  /** Test that don't need to recover from a situation where the last
+   * entry in the edit log has been padded with 0xff bytes. */
+  @Test(timeout=180000)
+  public void testRecoverNegativeOnePaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), true);
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), false);
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
Wed May 16 22:47:37 2012
@@ -256,16 +256,21 @@ public class TestFailureToReadEdits {
     // Shutdown the active NN.
     cluster.shutdownNameNode(0);
     
+    Runtime mockRuntime = mock(Runtime.class);
+    cluster.getNameNode(1).setRuntimeForTesting(mockRuntime);
+    verify(mockRuntime, times(0)).exit(anyInt());
     try {
       // Transition the standby to active.
       cluster.transitionToActive(1);
       fail("Standby transitioned to active, but should not have been able to");
     } catch (ServiceFailedException sfe) {
-      LOG.info("got expected exception: " + sfe.toString(), sfe);
+      Throwable sfeCause = sfe.getCause();
+      LOG.info("got expected exception: " + sfeCause.toString(), sfeCause);
       assertTrue("Standby failed to catch up for some reason other than "
-          + "failure to read logs", sfe.toString().contains(
+          + "failure to read logs", sfeCause.getCause().toString().contains(
               EditLogInputException.class.getName()));
     }
+    verify(mockRuntime, times(1)).exit(anyInt());
   }
   
   private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
Wed May 16 22:47:37 2012
@@ -85,12 +85,52 @@ public class TestPipelinesFailover {
   
   private static final int STRESS_NUM_THREADS = 25;
   private static final int STRESS_RUNTIME = 40000;
+  
+  enum TestScenario {
+    GRACEFUL_FAILOVER {
+      void run(MiniDFSCluster cluster) throws IOException {
+        cluster.transitionToStandby(0);
+        cluster.transitionToActive(1);
+      }
+    },
+    ORIGINAL_ACTIVE_CRASHED {
+      void run(MiniDFSCluster cluster) throws IOException {
+        cluster.restartNameNode(0);
+        cluster.transitionToActive(1);
+      }
+    };
+
+    abstract void run(MiniDFSCluster cluster) throws IOException;
+  }
+  
+  enum MethodToTestIdempotence {
+    ALLOCATE_BLOCK,
+    COMPLETE_FILE;
+  }
 
   /**
    * Tests continuing a write pipeline over a failover.
    */
   @Test(timeout=30000)
-  public void testWriteOverFailover() throws Exception {
+  public void testWriteOverGracefulFailover() throws Exception {
+    doWriteOverFailoverTest(TestScenario.GRACEFUL_FAILOVER,
+        MethodToTestIdempotence.ALLOCATE_BLOCK);
+  }
+  
+  @Test(timeout=30000)
+  public void testAllocateBlockAfterCrashFailover() throws Exception {
+    doWriteOverFailoverTest(TestScenario.ORIGINAL_ACTIVE_CRASHED,
+        MethodToTestIdempotence.ALLOCATE_BLOCK);
+  }
+
+  @Test(timeout=30000)
+  public void testCompleteFileAfterCrashFailover() throws Exception {
+    doWriteOverFailoverTest(TestScenario.ORIGINAL_ACTIVE_CRASHED,
+        MethodToTestIdempotence.COMPLETE_FILE);
+  }
+  
+  private void doWriteOverFailoverTest(TestScenario scenario,
+      MethodToTestIdempotence methodToTest) throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     // Don't check replication periodically.
@@ -102,6 +142,8 @@ public class TestPipelinesFailover {
       .numDataNodes(3)
       .build();
     try {
+      int sizeWritten = 0;
+      
       cluster.waitActive();
       cluster.transitionToActive(0);
       Thread.sleep(500);
@@ -112,28 +154,39 @@ public class TestPipelinesFailover {
       
       // write a block and a half
       AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
+      sizeWritten += BLOCK_AND_A_HALF;
       
       // Make sure all of the blocks are written out before failover.
       stm.hflush();
 
       LOG.info("Failing over to NN 1");
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
+      scenario.run(cluster);
 
-      assertTrue(fs.exists(TEST_PATH));
+      // NOTE: explicitly do *not* make any further metadata calls
+      // to the NN here. The next IPC call should be to allocate the next
+      // block. Any other call would notice the failover and not test
+      // idempotence of the operation (HDFS-3031)
+      
       FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
       BlockManagerTestUtil.updateState(ns1.getBlockManager());
       assertEquals(0, ns1.getPendingReplicationBlocks());
       assertEquals(0, ns1.getCorruptReplicaBlocks());
       assertEquals(0, ns1.getMissingBlocksCount());
 
-      // write another block and a half
-      AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF);
-
+      // If we're testing allocateBlock()'s idempotence, write another
+      // block and a half, so we have to allocate a new block.
+      // Otherise, don't write anything, so our next RPC will be
+      // completeFile() if we're testing idempotence of that operation.
+      if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
+        // write another block and a half
+        AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
+        sizeWritten += BLOCK_AND_A_HALF;
+      }
+      
       stm.close();
       stm = null;
       
-      AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE * 3);
+      AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
     } finally {
       IOUtils.closeStream(stm);
       cluster.shutdown();
@@ -146,7 +199,18 @@ public class TestPipelinesFailover {
    * even when the pipeline was constructed on a different NN.
    */
   @Test(timeout=30000)
-  public void testWriteOverFailoverWithDnFail() throws Exception {
+  public void testWriteOverGracefulFailoverWithDnFail() throws Exception {
+    doTestWriteOverFailoverWithDnFail(TestScenario.GRACEFUL_FAILOVER);
+  }
+  
+  @Test(timeout=30000)
+  public void testWriteOverCrashFailoverWithDnFail() throws Exception {
+    doTestWriteOverFailoverWithDnFail(TestScenario.ORIGINAL_ACTIVE_CRASHED);
+  }
+
+  
+  private void doTestWriteOverFailoverWithDnFail(TestScenario scenario)
+      throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     
@@ -171,8 +235,7 @@ public class TestPipelinesFailover {
       stm.hflush();
 
       LOG.info("Failing over to NN 1");
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
+      scenario.run(cluster);
 
       assertTrue(fs.exists(TEST_PATH));
       
@@ -183,8 +246,8 @@ public class TestPipelinesFailover {
       stm.hflush();
       
       LOG.info("Failing back to NN 0");
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
+      cluster.transitionToStandby(1);
+      cluster.transitionToActive(0);
       
       cluster.stopDataNode(1);
       

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java?rev=1339410&r1=1339409&r2=1339410&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
(original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
Wed May 16 22:47:37 2012
@@ -67,8 +67,8 @@ public class TestStateTransitionFailure 
         fail("Transitioned to active but should not have been able to.");
       } catch (ServiceFailedException sfe) {
         assertExceptionContains("Error encountered requiring NN shutdown. " +
-            "Shutting down immediately.", sfe);
-        LOG.info("got expected exception", sfe);
+            "Shutting down immediately.", sfe.getCause());
+        LOG.info("got expected exception", sfe.getCause());
       }
       verify(mockRuntime, times(1)).exit(anyInt());
     } finally {



Mime
View raw message