hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r727214 - in /hadoop/core/branches/branch-0.19: ./ CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
Date Tue, 16 Dec 2008 23:26:31 GMT
Author: hairong
Date: Tue Dec 16 15:26:30 2008
New Revision: 727214

URL: http://svn.apache.org/viewvc?rev=727214&view=rev
Log:
Merge -r 727211:727212 from trunk to move the change of HADOOP-4810 into branch 0.19.

Modified:
    hadoop/core/branches/branch-0.19/   (props changed)
    hadoop/core/branches/branch-0.19/CHANGES.txt   (contents, props changed)
    hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

Propchange: hadoop/core/branches/branch-0.19/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 16 15:26:30 2008
@@ -1 +1 @@
-/hadoop/core/trunk:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,709040,709303,712881,713888,720602,723013,723460,723831,723918,724883
+/hadoop/core/trunk:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,709040,709303,712881,713888,720602,723013,723460,723831,723918,724883,727212

Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=727214&r1=727213&r2=727214&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Tue Dec 16 15:26:30 2008
@@ -1093,6 +1093,8 @@
     HADOOP-4857. Fixes TestUlimit to have exactly 1 map in the jobs spawned.
     (Ravi Gummadi via ddas)
 
+    HADOOP-4810. Data lost at cluster startup time. (hairong)
+
 Release 0.18.2 - 2008-11-03
 
   BUG FIXES

Propchange: hadoop/core/branches/branch-0.19/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Dec 16 15:26:30 2008
@@ -1 +1 @@
-/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883
+/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727212

Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=727214&r1=727213&r2=727214&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Tue Dec 16 15:26:30 2008
@@ -1566,13 +1566,13 @@
     } else {
       INodeFile inode = blocksMap.getINode(blk);
       assert inode!=null : (blk + " in blocksMap must belongs to a file.");
+      // Add this replica to corruptReplicas Map 
+      corruptReplicas.addToCorruptReplicasMap(blk, node);
       if (countNodes(blk).liveReplicas()>inode.getReplication()) {
         // the block is over-replicated so invalidate the replicas immediately
         invalidateBlock(blk, node);
       } else {
-        // Add this replica to corruptReplicas Map and 
         // add the block to neededReplication 
-        corruptReplicas.addToCorruptReplicasMap(blk, node);
         updateNeededReplications(blk, -1, 0);
       }
     }
@@ -1586,9 +1586,6 @@
     NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: " 
                                  + blk + " on " 
                                  + dn.getName());
-    if (isInSafeMode()) {
-      throw new SafeModeException("Cannot invalidate block " + blk, safeMode);
-    }
     DatanodeDescriptor node = getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot invalidate block " + blk +
@@ -2832,7 +2829,7 @@
     assert storedBlock != null : "Block must be stored by now";
 
     if (block != storedBlock) {
-      if (block.getNumBytes() > 0) {
+      if (block.getNumBytes() >= 0) {
         long cursize = storedBlock.getNumBytes();
         if (cursize == 0) {
           storedBlock.setNumBytes(block.getNumBytes());
@@ -2844,12 +2841,13 @@
           try {
             if (cursize > block.getNumBytes()) {
               // new replica is smaller in size than existing block.
-              // Delete new replica.
-              LOG.warn("Deleting block " + block + " from " + node.getName());
-              invalidateBlock(block, node);
+              // Mark the new replica as corrupt.
+              LOG.warn("Mark new replica " + block + " from " + node.getName() + 
+                  "as corrupt because its length is shorter than existing ones");
+              markBlockAsCorrupt(block, node);
             } else {
               // new replica is larger in size than existing block.
-              // Delete pre-existing replicas.
+              // Mark pre-existing replicas as corrupt.
               int numNodes = blocksMap.numNodes(block);
               int count = 0;
               DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
@@ -2861,9 +2859,9 @@
                 }
               }
               for (int j = 0; j < count; j++) {
-                LOG.warn("Deleting block " + block + " from " + 
-                         nodes[j].getName());
-                invalidateBlock(block, nodes[j]);
+                LOG.warn("Mark existing replica " + block + " from " + node.getName() + 
+                " as corrupt because its length is shorter than the new one");
+                markBlockAsCorrupt(block, nodes[j]);
               }
               //
               // change the size of block in blocksMap

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=727214&r1=727213&r2=727214&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
(original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
Tue Dec 16 15:26:30 2008
@@ -385,4 +385,67 @@
     assertTrue(blocks.get(0).isCorrupt() == false);
     cluster.shutdown();
   }
+  
+  /** Test if NameNode handles truncated blocks in block report */
+  public void testTruncatedBlockReport() throws Exception {
+    final Configuration conf = new Configuration();
+    final short REPLICATION_FACTOR = (short)2;
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      final Path fileName = new Path("/file1");
+      DFSTestUtil.createFile(fs, fileName, 1, REPLICATION_FACTOR, 0);
+      DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
+
+      String block = DFSTestUtil.getFirstBlock(fs, fileName).getBlockName();
+
+      // Truncate replica of block
+      truncateReplica(block, 0);
+
+      cluster.shutdown();
+
+      // restart the cluster
+      cluster = new MiniDFSCluster(
+          0, conf, REPLICATION_FACTOR, false, true, null, null, null);
+      cluster.startDataNodes(conf, 1, true, null, null);
+      cluster.waitActive();  // now we have 3 datanodes
+
+      // wait for truncated block be detected and the block to be replicated
+      DFSTestUtil.waitReplication(
+          cluster.getFileSystem(), fileName, REPLICATION_FACTOR);
+      
+      // Make sure that truncated block will be deleted
+      waitForBlockDeleted(block, 0);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  private void truncateReplica(String blockName, int dnIndex) throws IOException {
+    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    for (int i=dnIndex*2; i<dnIndex*2+2; i++) {
+      File blockFile = new File(baseDir, "data" + (i+1)+ "/current/" + 
+                               blockName);
+      if (blockFile.exists()) {
+        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+        raFile.setLength(raFile.length()-1);
+        raFile.close();
+        break;
+      }
+    }
+  }
+  
+  private void waitForBlockDeleted(String blockName, int dnIndex) 
+  throws IOException, InterruptedException {
+    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1)+ "/current/" + 
+        blockName);
+    File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2)+ "/current/" + 
+        blockName);
+    while (blockFile1.exists() || blockFile2.exists()) {
+      Thread.sleep(100);
+    }
+  }
 }



Mime
View raw message