hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kih...@apache.org
Subject svn commit: r1440201 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/java/org/apache/hadoop/hdfs/server/protocol/ src/test/java/org/apache/hadoop/hdfs/se...
Date Tue, 29 Jan 2013 22:54:36 GMT
Author: kihwal
Date: Tue Jan 29 22:54:36 2013
New Revision: 1440201

URL: http://svn.apache.org/viewvc?rev=1440201&view=rev
Log:
HDFS-4288. NN accepts incremental BR as IBR in safemode. Contributed by Daryn Sharp.

Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jan
29 22:54:36 2013
@@ -11,6 +11,7 @@ Release 0.23.7 - UNRELEASED
   OPTIMIZATIONS
 
   BUG FIXES
+    HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal)
 
 Release 0.23.6 - UNRELEASED
 

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
Tue Jan 29 22:54:36 2013
@@ -77,11 +77,7 @@ public class BlockInfo extends Block imp
   DatanodeDescriptor getDatanode(int index) {
     assert this.triplets != null : "BlockInfo is not initialized";
     assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
-    DatanodeDescriptor node = (DatanodeDescriptor)triplets[index*3];
-    assert node == null || 
-        DatanodeDescriptor.class.getName().equals(node.getClass().getName()) : 
-              "DatanodeDescriptor is expected at " + index*3;
-    return node;
+    return (DatanodeDescriptor)triplets[index*3];
   }
 
   BlockInfo getPrevious(int index) {

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
Tue Jan 29 22:54:36 2013
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -1394,7 +1395,7 @@ public class BlockManager {
 
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
-      if (namesystem.isInStartupSafeMode() && node.numBlocks() > 0) {
+      if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
         blockLog.info("BLOCK* processReport: "
             + "discarded non-initial block report from " + nodeID.getName()
             + " because namenode still in startup phase");
@@ -1408,13 +1409,17 @@ public class BlockManager {
       } else {
         processReport(node, newReport);
       }
+      node.receivedBlockReport();
     } finally {
       endTime = Util.now();
       namesystem.writeUnlock();
     }
 
     // Log the block report processing stats from Namenode perspective
-    NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
+    final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
+    if (metrics != null) {
+      metrics.addBlockReport((int) (endTime - startTime));
+    }
     blockLog.info("BLOCK* processReport: from "
         + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
Tue Jan 29 22:54:36 2013
@@ -132,6 +132,13 @@ public class DatanodeDescriptor extends 
   private long lastBlocksScheduledRollTime = 0;
   private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
   private int volumeFailures = 0;
+  
+  /* Set to true after processing first block report.  Will be reset to false
+   * if the node re-registers.  This enables a NN in safe-mode to reprocess
+   * the first block report in case the DN is now reporting different blocks
+   */
+  private boolean processedBlockReport = false;
+  
   /** 
    * When set to true, the node is not in include list and is not allowed
    * to communicate with the namenode
@@ -575,6 +582,7 @@ public class DatanodeDescriptor extends 
    * @param nodeReg DatanodeID to update registration for.
    */
   public void updateRegInfo(DatanodeID nodeReg) {
+    processedBlockReport = false; // must re-process IBR after re-registration
     super.updateRegInfo(nodeReg);
   }
 
@@ -592,5 +600,11 @@ public class DatanodeDescriptor extends 
     this.bandwidth = bandwidth;
   }
 
+  public void receivedBlockReport() {
+    processedBlockReport = true;
+  }
 
+  boolean isFirstBlockReport() {
+    return !processedBlockReport;
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
Tue Jan 29 22:54:36 2013
@@ -318,7 +318,7 @@ public class DatanodeManager {
   }
 
   /** Add a datanode. */
-  private void addDatanode(final DatanodeDescriptor node) {
+  void addDatanode(final DatanodeDescriptor node) {
     // To keep host2DatanodeMap consistent with datanodeMap,
     // remove  from host2DatanodeMap the datanodeDescriptor removed
     // from datanodeMap before adding node to host2DatanodeMap.

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
Tue Jan 29 22:54:36 2013
@@ -59,6 +59,10 @@ implements Writable, NodeRegistration {
     this("");
   }
   
+  public DatanodeRegistration(DatanodeID node) {
+    super(node);
+  }
+  
   /**
    * Create DatanodeRegistration
    */

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1440201&r1=1440200&r2=1440201&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
(original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
Tue Jan 29 22:54:36 2013
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@@ -36,10 +37,12 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.net.NetworkTopology;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
+import static org.mockito.Mockito.*;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableList;
@@ -442,4 +445,69 @@ public class TestBlockManager {
             new NumberReplicas(),
             UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
   }
+
+  @Test
+  public void testSafeModeIBR() throws Exception {
+    DatanodeDescriptor node = spy(nodes.get(0));
+    node.setStorageID("dummy-storage");
+    node.isAlive = true;
+
+    DatanodeRegistration nodeReg = new DatanodeRegistration(node);
+
+    // pretend to be in safemode
+    doReturn(true).when(fsn).isInStartupSafeMode();
+    
+    // register new node
+    bm.getDatanodeManager().registerDatanode(nodeReg);
+    bm.getDatanodeManager().addDatanode(node); // swap in spy    
+    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
+    assertTrue(node.isFirstBlockReport());
+    // send block report, should be processed
+    reset(node);
+    bm.processReport(node, "pool", new BlockListAsLongs(null, null));
+    verify(node).receivedBlockReport();
+    assertFalse(node.isFirstBlockReport());
+    // send block report again, should NOT be processed
+    reset(node);
+    bm.processReport(node, "pool", new BlockListAsLongs(null, null));
+    verify(node, never()).receivedBlockReport();
+    assertFalse(node.isFirstBlockReport());
+
+    // re-register as if node restarted, should update existing node
+    bm.getDatanodeManager().removeDatanode(node);
+    reset(node);
+    bm.getDatanodeManager().registerDatanode(nodeReg);
+    verify(node).updateRegInfo(nodeReg);
+    assertTrue(node.isFirstBlockReport()); // ready for report again
+    // send block report, should be processed after restart
+    reset(node);
+    bm.processReport(node, "pool", new BlockListAsLongs(null, null));
+    verify(node).receivedBlockReport();
+    assertFalse(node.isFirstBlockReport());
+  }
+  
+  @Test
+  public void testSafeModeIBRAfterIncremental() throws Exception {
+    DatanodeDescriptor node = spy(nodes.get(0));
+    node.setStorageID("dummy-storage");
+    node.isAlive = true;
+
+    DatanodeRegistration nodeReg = new DatanodeRegistration(node);
+    BlockListAsLongs blockReport = new BlockListAsLongs(null, null);
+
+    // pretend to be in safemode
+    doReturn(true).when(fsn).isInStartupSafeMode();
+
+    // register new node
+    bm.getDatanodeManager().registerDatanode(nodeReg);
+    bm.getDatanodeManager().addDatanode(node); // swap in spy    
+    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
+    assertTrue(node.isFirstBlockReport());
+    // send block report while pretending to already have blocks
+    reset(node);
+    doReturn(1).when(node).numBlocks();
+    bm.processReport(node, "pool", new BlockListAsLongs(null, null));
+    verify(node).receivedBlockReport();
+    assertFalse(node.isFirstBlockReport());
+  }
 }



Mime
View raw message