hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1037109 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
Date Sat, 20 Nov 2010 03:48:49 GMT
Author: eli
Date: Sat Nov 20 03:48:48 2010
New Revision: 1037109

URL: http://svn.apache.org/viewvc?rev=1037109&view=rev
Log:
HDFS-1467. Append pipeline never succeeds with more than one replica. Contributed by Todd
Lipcon

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1037109&r1=1037108&r2=1037109&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Sat Nov 20 03:48:48 2010
@@ -396,6 +396,9 @@ Release 0.22.0 - Unreleased
     HDFS-1167. New property for local conf directory in system-test-hdfs.xml
     file. (Vinay Thota via cos)
 
+    HDFS-1467. Append pipeline never succeeds with more than one replica.
+    (Todd Lipcon via eli)
+
 Release 0.21.1 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1037109&r1=1037108&r2=1037109&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Sat
Nov 20 03:48:48 2010
@@ -248,6 +248,11 @@ class DataXceiver extends DataTransferPr
                 " tcp no delay " + s.getTcpNoDelay());
     }
 
+    // We later mutate block's generation stamp and length, but we need to
+    // forward the original version of the block to downstream mirrors, so
+    // make a copy here.
+    final Block originalBlock = new Block(block);
+
     block.setNumBytes(dataXceiverServer.estimateBlockSize);
     LOG.info("Receiving block " + block + 
              " src: " + remoteAddress +
@@ -322,7 +327,7 @@ class DataXceiver extends DataTransferPr
           mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
 
           // Write header: Copied from DFSClient.java!
-          DataTransferProtocol.Sender.opWriteBlock(mirrorOut, block,
+          DataTransferProtocol.Sender.opWriteBlock(mirrorOut, originalBlock,
               pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, client,
               srcDataNode, targets, blockToken);
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java?rev=1037109&r1=1037108&r2=1037109&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java Sat Nov 20 03:48:48
2010
@@ -103,12 +103,16 @@ public class TestPipelines {
     List<LocatedBlock> lb = cluster.getNameNode().getBlockLocations(
       filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
 
-    Replica r = DataNodeAdapter.fetchReplicaInfo(cluster.getDataNodes().get(0),
-      lb.get(0).getBlock().getBlockId());
-    assertTrue("Replica shouldn'e be null", r != null);
-    assertEquals(
-      "Should be RBW replica after sequence of calls append()/write()/hflush()",
-      HdfsConstants.ReplicaState.RBW, r.getState());
+    for (DataNode dn : cluster.getDataNodes()) {
+      Replica r = DataNodeAdapter.fetchReplicaInfo(
+        dn, lb.get(0).getBlock().getBlockId());
+
+      assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
+      assertEquals(
+        "Should be RBW replica on " + dn + " after sequence of calls " +
+        "append()/write()/hflush()",
+        HdfsConstants.ReplicaState.RBW, r.getState());
+    }
     ofs.close();
   }
 



Mime
View raw message