hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From y...@apache.org
Subject hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
Date Fri, 20 Mar 2015 00:24:57 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cabe676d6 -> 90164ffd8


HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90164ffd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90164ffd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90164ffd

Branch: refs/heads/branch-2
Commit: 90164ffd84f6ef56e9f8f99dcc7424a8d115dbae
Parents: cabe676
Author: yliu <yliu@apache.org>
Authored: Thu Mar 19 23:24:55 2015 +0800
Committer: yliu <yliu@apache.org>
Committed: Thu Mar 19 23:24:55 2015 +0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../server/blockmanagement/BlockManager.java    | 40 ++++++++++++++++++++
 .../hdfs/server/namenode/FSNamesystem.java      |  8 +++-
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 +----
 4 files changed, 50 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ead8912..44fbfcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -915,6 +915,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7932. Speed up the shutdown of datanode during rolling upgrade.(kihwal)
 
+    HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8f22558..73c1425 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1955,6 +1955,46 @@ public class BlockManager {
   }
 
   /**
+   * Mark block replicas as corrupt except those on the storages in 
+   * newStorages list.
+   */
+  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+      long oldGenerationStamp, long oldNumBytes, 
+      DatanodeStorageInfo[] newStorages) throws IOException {
+    assert namesystem.hasWriteLock();
+    BlockToMarkCorrupt b = null;
+    if (block.getGenerationStamp() != oldGenerationStamp) {
+      b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+          "genstamp does not match " + oldGenerationStamp
+          + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+    } else if (block.getNumBytes() != oldNumBytes) {
+      b = new BlockToMarkCorrupt(block,
+          "length does not match " + oldNumBytes
+          + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH);
+    } else {
+      return;
+    }
+
+    for (DatanodeStorageInfo storage : getStorages(block)) {
+      boolean isCorrupt = true;
+      if (newStorages != null) {
+        for (DatanodeStorageInfo newStorage : newStorages) {
+          if (newStorage!= null && storage.equals(newStorage)) {
+            isCorrupt = false;
+            break;
+          }
+        }
+      }
+      if (isCorrupt) {
+        blockLog.info("BLOCK* markBlockReplicasAsCorrupt: mark block replica" +
+            " {} on {} as corrupt because the dn is not in the new committed " +
+            "storage list.", b, storage.getDatanodeDescriptor());
+        markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
+      }
+    }
+  }
+
+  /**
    * processFirstBlockReport is intended only for processing "initial" block
    * reports, the first block report received from a DN after it registers.
    * It just adds all the valid replicas to the datanode, without calculating 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a8072a9..ad17e8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4221,6 +4221,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           throw new IOException("Block (=" + oldBlock + ") not found");
         }
       }
+      final long oldGenerationStamp = storedBlock.getGenerationStamp();
+      final long oldNumBytes = storedBlock.getNumBytes();
       //
       // The implementation of delete operation (see @deleteInternal method)
       // first removes the file paths from namespace, and delays the removal
@@ -4281,8 +4283,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         }
 
         // find the DatanodeDescriptor objects
-        // There should be no locations in the blockManager till now because the
-        // file is underConstruction
         ArrayList<DatanodeDescriptor> trimmedTargets =
             new ArrayList<DatanodeDescriptor>(newtargets.length);
         ArrayList<String> trimmedStorages =
@@ -4326,6 +4326,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           iFile.setLastBlock(truncatedBlock, trimmedStorageInfos);
         } else {
           iFile.setLastBlock(storedBlock, trimmedStorageInfos);
+          if (closeFile) {
+            blockManager.markBlockReplicasAsCorrupt(storedBlock,
+                oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
+          }
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90164ffd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 8d447ee..fbcc73f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -688,11 +688,7 @@ public class TestFileTruncate {
     /*
      * For non copy-on-truncate, the truncated block id is the same, but the 
      * GS should increase.
-     * We trigger block report for dn0 after it restarts, since the GS 
-     * of replica for the last block on it is old, so the reported last block
-     * from dn0 should be marked corrupt on nn and the replicas of last block 
-     * on nn should decrease 1, then the truncated block will be replicated 
-     * to dn0.
+     * The truncated block will be replicated to dn0 after it restarts.
      */
     assertEquals(newBlock.getBlock().getBlockId(), 
         oldBlock.getBlock().getBlockId());
@@ -748,8 +744,7 @@ public class TestFileTruncate {
     LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
     /*
      * For copy-on-truncate, new block is made with new block id and new GS.
-     * We trigger block report for dn1 after it restarts. The replicas of 
-     * the new block is 2, and then it will be replicated to dn1.
+     * The replicas of the new block is 2, then it will be replicated to dn1.
      */
     assertNotEquals(newBlock.getBlock().getBlockId(), 
         oldBlock.getBlock().getBlockId());
@@ -802,7 +797,6 @@ public class TestFileTruncate {
     cluster.restartDataNode(dn1, true, true);
     cluster.waitActive();
     checkBlockRecovery(p);
-    cluster.triggerBlockReports();
 
     LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
     /*


Mime
View raw message