hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From c..@apache.org
Subject svn commit: r888084 - in /hadoop/hdfs/trunk: CHANGES.txt src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
Date Mon, 07 Dec 2009 19:00:05 GMT
Author: cos
Date: Mon Dec  7 19:00:04 2009
New Revision: 888084

URL: http://svn.apache.org/viewvc?rev=888084&view=rev
Log:
HDFS-804. New unit tests for concurrent lease recovery. Contributed by Konstantin Boudnik

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=888084&r1=888083&r2=888084&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Dec  7 19:00:04 2009
@@ -34,6 +34,8 @@
 
     HDFS-519. Create new tests for lease recovery (cos)
 
+    HDFS-804. New unit tests for concurrent lease recovery (cos)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=888084&r1=888083&r2=888084&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
(original)
+++ hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
Mon Dec  7 19:00:04 2009
@@ -19,15 +19,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static junit.framework.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.when;
-
-import java.io.File;
-import java.io.IOException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -39,13 +30,20 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
+import static org.junit.Assert.assertFalse;
 import org.junit.Before;
 import org.junit.Test;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+import java.io.File;
+import java.io.IOException;
 
 public class TestNNLeaseRecovery {
   public static final Log LOG = LogFactory.getLog(TestNNLeaseRecovery.class);
@@ -128,8 +126,8 @@
     PermissionStatus ps =
       new PermissionStatus("test", "test", new FsPermission((short)0777));
     
-    mockFileBlocks(null, 
-      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps);
+    mockFileBlocks(2, null, 
+      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
     
     fsn.internalReleaseLease(lm, file.toString(), null);
     assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
@@ -152,8 +150,31 @@
     PermissionStatus ps =
       new PermissionStatus("test", "test", new FsPermission((short)0777));
 
-    mockFileBlocks(HdfsConstants.BlockUCState.COMMITTED, 
-      HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps);
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
+
+    fsn.internalReleaseLease(lm, file.toString(), null);
+    assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
+      "AlreadyBeingCreatedException here", false);
+  }
+  
+  /**
+   * Mocks FSNamesystem instance, adds an empty file with 1 block
+   * and invokes lease recovery method. 
+   * AlreadyBeingCreatedException is expected.
+   * @throws AlreadyBeingCreatedException as the result
+   */
+  @Test(expected=AlreadyBeingCreatedException.class)
+  public void testInternalReleaseLease_1blocks () throws IOException {
+    LOG.debug("Running " + GenericTestUtils.getMethodName());
+    LeaseManager.Lease lm = mock(LeaseManager.Lease.class);
+    Path file = 
+      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+    PermissionStatus ps =
+      new PermissionStatus("test", "test", new FsPermission((short)0777));
+
+    mockFileBlocks(1, null, HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
 
     fsn.internalReleaseLease(lm, file.toString(), null);
     assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
@@ -176,25 +197,169 @@
     PermissionStatus ps =
       new PermissionStatus("test", "test", new FsPermission((short)0777));
     
-    mockFileBlocks(HdfsConstants.BlockUCState.COMMITTED, 
-      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps);
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
         
     assertFalse("False is expected in return in this case",
       fsn.internalReleaseLease(lm, file.toString(), null));
   }
 
-  private void mockFileBlocks(HdfsConstants.BlockUCState penUltState,
-                           HdfsConstants.BlockUCState lastState,
-                           Path file, DatanodeDescriptor dnd, 
-                           PermissionStatus ps) throws IOException {
+  @Test
+  public void testCommitBlockSynchronization_BlockNotFound () 
+    throws IOException {
+    LOG.debug("Running " + GenericTestUtils.getMethodName());
+    long recoveryId = 2002;
+    long newSize = 273487234;
+    Path file = 
+      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+    PermissionStatus ps =
+      new PermissionStatus("test", "test", new FsPermission((short)0777));
+    
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
+    
+    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); 
+    try {
+      fsn.commitBlockSynchronization(lastBlock,
+        recoveryId, newSize, true, false, new DatanodeID[1]);
+    } catch (IOException ioe) {
+      assertTrue(ioe.getMessage().startsWith("Block (="));
+    }
+  }
+  
+  @Test
+  public void testCommitBlockSynchronization_notUR () 
+    throws IOException {
+    LOG.debug("Running " + GenericTestUtils.getMethodName());
+    long recoveryId = 2002;
+    long newSize = 273487234;
+    Path file = 
+      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+    PermissionStatus ps =
+      new PermissionStatus("test", "test", new FsPermission((short)0777));
+    
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.COMPLETE, file, dnd, ps, true);
+    
+    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+    when(lastBlock.isComplete()).thenReturn(true);
+    
+    try {
+      fsn.commitBlockSynchronization(lastBlock,
+        recoveryId, newSize, true, false, new DatanodeID[1]);
+    } catch (IOException ioe) {
+      assertTrue(ioe.getMessage().startsWith("Unexpected block (="));
+    }
+  }
+  
+  @Test
+  public void testCommitBlockSynchronization_WrongGreaterRecoveryID() 
+    throws IOException {
+    LOG.debug("Running " + GenericTestUtils.getMethodName());
+    long recoveryId = 2002;
+    long newSize = 273487234;
+    Path file = 
+      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+    PermissionStatus ps =
+      new PermissionStatus("test", "test", new FsPermission((short)0777));
+    
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
+    
+    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+    when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId-100);
+    
+    try {
+      fsn.commitBlockSynchronization(lastBlock,
+        recoveryId, newSize, true, false, new DatanodeID[1]);
+    } catch (IOException ioe) {
+      assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not
match current recovery id " + (recoveryId-100)));
+    }
+  }  
+  
+  @Test
+  public void testCommitBlockSynchronization_WrongLesserRecoveryID() 
+    throws IOException {
+    LOG.debug("Running " + GenericTestUtils.getMethodName());
+    long recoveryId = 2002;
+    long newSize = 273487234;
+    Path file = 
+      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+    PermissionStatus ps =
+      new PermissionStatus("test", "test", new FsPermission((short)0777));
+    
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
+    
+    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+    when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId+100);
+    
+    try {           
+      fsn.commitBlockSynchronization(lastBlock,
+        recoveryId, newSize, true, false, new DatanodeID[1]);
+    } catch (IOException ioe) {
+      assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not
match current recovery id " + (recoveryId+100)));
+    }
+  }
+
+  @Test
+  public void testCommitBlockSynchronization_EqualRecoveryID() 
+    throws IOException {
+    LOG.debug("Running " + GenericTestUtils.getMethodName());
+    long recoveryId = 2002;
+    long newSize = 273487234;
+    Path file = 
+      spy(new Path("/" + GenericTestUtils.getMethodName() + "_test.dat"));
+    DatanodeDescriptor dnd = mock(DatanodeDescriptor.class);
+    PermissionStatus ps =
+      new PermissionStatus("test", "test", new FsPermission((short)0777));
+    
+    mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
+      HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, true);
+    
+    BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock();
+    when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId);
+    
+    boolean recoveryChecked = false;
+    try {
+      fsn.commitBlockSynchronization(lastBlock,
+        recoveryId, newSize, true, false, new DatanodeID[1]);
+    } catch (NullPointerException ioe) {
+      // It is fine to get NPE here because the datanodes array is empty
+      recoveryChecked = true;
+    }
+    assertTrue("commitBlockSynchronization had to throw NPE here", recoveryChecked);
+  }
+
+  private void mockFileBlocks(int fileBlocksNumber,
+                              HdfsConstants.BlockUCState penUltState,
+                              HdfsConstants.BlockUCState lastState,
+                              Path file, DatanodeDescriptor dnd,
+                              PermissionStatus ps,
+                              boolean setStoredBlock) throws IOException {
     BlockInfo b = mock(BlockInfo.class);
     BlockInfoUnderConstruction b1 = mock(BlockInfoUnderConstruction.class);
     when(b.getBlockUCState()).thenReturn(penUltState);
     when(b1.getBlockUCState()).thenReturn(lastState);
-    BlockInfo[] blocks = new BlockInfo[]{b, b1};
+    BlockInfo[] blocks;
+    switch (fileBlocksNumber) {
+      case 0:
+        blocks = new BlockInfo[0];
+        break;
+      case 1:
+        blocks = new BlockInfo[]{b1};
+        break;
+      default:
+        blocks = new BlockInfo[]{b, b1};
+    }
 
     FSDirectory fsDir = mock(FSDirectory.class);
     INodeFileUnderConstruction iNFmock = mock(INodeFileUnderConstruction.class);
+
     fsn.dir = fsDir;
     FSImage fsImage = mock(FSImage.class);
     FSEditLog editLog = mock(FSEditLog.class);
@@ -211,6 +376,11 @@
     fsDir.addFile(file.toString(), ps, (short)3, 1l, "test", 
       "test-machine", dnd, 1001l);
 
+    if (setStoredBlock) {
+      when(b1.getINode()).thenReturn(iNFmock);
+      fsn.blockManager.blocksMap.addINode(b1, iNFmock);
+    }
+
     when(fsDir.getFileINode(anyString())).thenReturn(iNFmock);
   }
 



Mime
View raw message