Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 6BE9618902 for ; Fri, 14 Aug 2015 07:17:57 +0000 (UTC) Received: (qmail 68449 invoked by uid 500); 14 Aug 2015 07:17:49 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 68047 invoked by uid 500); 14 Aug 2015 07:17:49 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 66059 invoked by uid 99); 14 Aug 2015 07:17:48 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 14 Aug 2015 07:17:48 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 603E3E714E; Fri, 14 Aug 2015 07:17:48 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: sjlee@apache.org To: common-commits@hadoop.apache.org Date: Fri, 14 Aug 2015 07:18:11 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [25/43] hadoop git commit: HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu) HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu) (cherry picked from commit 90164ffd84f6ef56e9f8f99dcc7424a8d115dbae) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c9a7461 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c9a7461 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c9a7461 Branch: refs/heads/sjlee/hdfs-merge Commit: 2c9a7461ec2ceba5885e95bc79f8dcbfd198df60 Parents: 0379841 Author: yliu Authored: Thu Mar 19 23:24:55 2015 +0800 Committer: Sangjin Lee Committed: Thu Aug 13 09:58:07 2015 -0700 ---------------------------------------------------------------------- .../server/blockmanagement/BlockManager.java | 41 ++++++++++++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 8 +++- 2 files changed, 47 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c9a7461/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d26cc52..5a38351 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1931,6 +1931,47 @@ public class BlockManager { } /** + * Mark block replicas as corrupt except those on the storages in + * newStorages list. + */ + public void markBlockReplicasAsCorrupt(BlockInfo block, + long oldGenerationStamp, long oldNumBytes, + DatanodeStorageInfo[] newStorages) throws IOException { + assert namesystem.hasWriteLock(); + BlockToMarkCorrupt b = null; + if (block.getGenerationStamp() != oldGenerationStamp) { + b = new BlockToMarkCorrupt(block, oldGenerationStamp, + "genstamp does not match " + oldGenerationStamp + + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + } else if (block.getNumBytes() != oldNumBytes) { + b = new BlockToMarkCorrupt(block, + "length does not match " + oldNumBytes + + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH); + } else { + return; + } + + for (DatanodeStorageInfo storage : getStorages(block)) { + boolean isCorrupt = true; + if (newStorages != null) { + for (DatanodeStorageInfo newStorage : newStorages) { + if (newStorage!= null && storage.equals(newStorage)) { + isCorrupt = false; + break; + } + } + } + if (isCorrupt) { + blockLog.info("BLOCK* markBlockReplicasAsCorrupt: mark block replica" + + b + " on " + storage.getDatanodeDescriptor() + + " as corrupt because the dn is not in the new committed " + + "storage list."); + markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor()); + } + } + } + + /** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. * It just adds all the valid replicas to the datanode, without calculating http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c9a7461/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c92b431..fa52981 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4791,6 +4791,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new IOException("Block (=" + lastblock + ") not found"); } } + final long oldGenerationStamp = storedBlock.getGenerationStamp(); + final long oldNumBytes = storedBlock.getNumBytes(); // // The implementation of delete operation (see @deleteInternal method) // first removes the file paths from namespace, and delays the removal @@ -4845,8 +4847,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, storedBlock.setNumBytes(newlength); // find the DatanodeDescriptor objects - // There should be no locations in the blockManager till now because the - // file is underConstruction ArrayList trimmedTargets = new ArrayList(newtargets.length); ArrayList trimmedStorages = @@ -4883,6 +4883,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]), trimmedStorages.toArray(new String[trimmedStorages.size()])); iFile.setLastBlock(storedBlock, trimmedStorageInfos); + if (closeFile) { + blockManager.markBlockReplicasAsCorrupt(storedBlock, + oldGenerationStamp, oldNumBytes, trimmedStorageInfos); + } } if (closeFile) {