hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kih...@apache.org
Subject svn commit: r1547179 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/test/java/org/apache/hadoop/hdfs/
Date Mon, 02 Dec 2013 20:27:37 GMT
Author: kihwal
Date: Mon Dec  2 20:27:37 2013
New Revision: 1547179

URL: http://svn.apache.org/r1547179
Log:
svn merge -c 1547173 merging from trunk to branch-2 to fix: HDFS-5557. Write pipeline recovery
for the last packet in the block may cause rejection of valid replicas.

Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1547179&r1=1547178&r2=1547179&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Dec  2
20:27:37 2013
@@ -3695,6 +3695,9 @@ Release 0.23.10 - UNRELEASED
 
     HDFS-5526. Datanode cannot roll back to previous layout version (kihwal)
 
+    HDFS-5557. Write pipeline recovery for the last packet in the block may
+    cause rejection of valid replicas. (kihwal)
+
 Release 0.23.9 - 2013-07-08
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1547179&r1=1547178&r2=1547179&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Mon Dec  2 20:27:37 2013
@@ -840,7 +840,6 @@ public class DFSOutputStream extends FSO
           // We also need to set lastAckedSeqno to the end-of-block Packet's seqno, so that
           // a client waiting on close() will be aware that the flush finished.
           synchronized (dataQueue) {
-            assert dataQueue.size() == 1;
             Packet endOfBlockPacket = dataQueue.remove();  // remove the end of block packet
             assert endOfBlockPacket.lastPacketInBlock;
             assert lastAckedSeqno == endOfBlockPacket.seqno - 1;
@@ -1044,7 +1043,7 @@ public class DFSOutputStream extends FSO
         
         // set up the pipeline again with the remaining nodes
         if (failPacket) { // for testing
-          success = createBlockOutputStream(nodes, newGS-1, isRecovery);
+          success = createBlockOutputStream(nodes, newGS, isRecovery);
           failPacket = false;
           try {
             // Give DNs time to send in bad reports. In real situations,

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java?rev=1547179&r1=1547178&r2=1547179&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
Mon Dec  2 20:27:37 2013
@@ -235,6 +235,8 @@ public class BlockInfoUnderConstruction 
    * @param genStamp  The final generation stamp for the block.
    */
   public void setGenerationStampAndVerifyReplicas(long genStamp) {
+    // Set the generation stamp for the block.
+    setGenerationStamp(genStamp);
     if (replicas == null)
       return;
 
@@ -244,12 +246,9 @@ public class BlockInfoUnderConstruction 
       if (genStamp != r.getGenerationStamp()) {
         r.getExpectedLocation().removeBlock(this);
         NameNode.blockStateChangeLog.info("BLOCK* Removing stale replica "
-            + "from location: " + r);
+            + "from location: " + r.getExpectedLocation());
       }
     }
-
-    // Set the generation stamp for the block.
-    setGenerationStamp(genStamp);
   }
 
   /**
@@ -264,6 +263,8 @@ public class BlockInfoUnderConstruction 
           + block.getBlockId() + ", expected id = " + getBlockId());
     blockUCState = BlockUCState.COMMITTED;
     this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
+    // Sort out invalid replicas.
+    setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }
 
   /**

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1547179&r1=1547178&r2=1547179&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
Mon Dec  2 20:27:37 2013
@@ -1556,13 +1556,15 @@ public class BlockManager {
    * Besides the block in question, it provides the ReplicaState
    * reported by the datanode in the block report. 
    */
-  private static class StatefulBlockInfo {
+  static class StatefulBlockInfo {
     final BlockInfoUnderConstruction storedBlock;
+    final Block reportedBlock;
     final ReplicaState reportedState;
     
     StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, 
-        ReplicaState reportedState) {
+        Block reportedBlock, ReplicaState reportedState) {
       this.storedBlock = storedBlock;
+      this.reportedBlock = reportedBlock;
       this.reportedState = reportedState;
     }
   }
@@ -1715,7 +1717,7 @@ public class BlockManager {
 
     // Process the blocks on each queue
     for (StatefulBlockInfo b : toUC) { 
-      addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState);
+      addStoredBlockUnderConstruction(b, node);
     }
     for (Block b : toRemove) {
       removeStoredBlock(b, node);
@@ -1939,7 +1941,7 @@ assert storedBlock.findDatanode(dn) < 0 
 
     if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
       toUC.add(new StatefulBlockInfo(
-          (BlockInfoUnderConstruction)storedBlock, reportedState));
+          (BlockInfoUnderConstruction)storedBlock, block, reportedState));
       return storedBlock;
     }
 
@@ -2110,13 +2112,11 @@ assert storedBlock.findDatanode(dn) < 0 
     }
   }
   
-  void addStoredBlockUnderConstruction(
-      BlockInfoUnderConstruction block, 
-      DatanodeDescriptor node, 
-      ReplicaState reportedState) 
-  throws IOException {
-    block.addReplicaIfNotPresent(node, block, reportedState);
-    if (reportedState == ReplicaState.FINALIZED && block.findDatanode(node) <
0) {
+  void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
+      DatanodeDescriptor node) throws IOException {
+    BlockInfoUnderConstruction block = ucBlock.storedBlock;
+    block.addReplicaIfNotPresent(node, ucBlock.reportedBlock, ucBlock.reportedState);
+    if (ucBlock.reportedState == ReplicaState.FINALIZED && block.findDatanode(node)
< 0) {
       addStoredBlock(block, node, null, true);
     }
   }
@@ -2683,7 +2683,7 @@ assert storedBlock.findDatanode(dn) < 0 
       : "The block should be only in one of the lists.";
 
     for (StatefulBlockInfo b : toUC) { 
-      addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState);
+      addStoredBlockUnderConstruction(b, node);
     }
     long numBlocksLogged = 0;
     for (BlockInfo b : toAdd) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=1547179&r1=1547178&r2=1547179&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
Mon Dec  2 20:27:37 2013
@@ -139,16 +139,10 @@ public class TestClientProtocolForPipeli
 
       Path file = new Path("dataprotocol1.dat");
       Mockito.when(faultInjector.failPacket()).thenReturn(true);
-      try {
-        DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
-      } catch (IOException e) {
-        // completeFile() should fail.
-        Assert.assertTrue(e.getMessage().startsWith("Unable to close file"));
-        return;
-      }
+      DFSTestUtil.createFile(fileSys, file, 68000000L, (short)numDataNodes, 0L);
 
-      // At this point, NN let data corruption to happen. 
-      // Before failing test, try reading the file. It should fail.
+      // At this point, NN should have accepted only valid replicas.
+      // Read should succeed.
       FSDataInputStream in = fileSys.open(file);
       try {
         int c = in.read();
@@ -158,8 +152,6 @@ public class TestClientProtocolForPipeli
         Assert.fail("Block is missing because the file was closed with"
             + " corrupt replicas.");
       }
-      Assert.fail("The file was closed with corrupt replicas, but read still"
-          + " works!");
     } finally {
       DFSClientFaultInjector.instance = oldInjector;
       if (cluster != null) {



Mime
View raw message