Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E572F172A5 for ; Fri, 13 Mar 2015 20:19:15 +0000 (UTC) Received: (qmail 58180 invoked by uid 500); 13 Mar 2015 20:19:15 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 58107 invoked by uid 500); 13 Mar 2015 20:19:15 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 58098 invoked by uid 99); 13 Mar 2015 20:19:15 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 13 Mar 2015 20:19:15 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 3A25AE1828; Fri, 13 Mar 2015 20:19:15 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: shv@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov. Date: Fri, 13 Mar 2015 20:19:15 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk d324164a5 -> 6acb7f211 HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6acb7f21 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6acb7f21 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6acb7f21 Branch: refs/heads/trunk Commit: 6acb7f2110897264241df44d564db2f85260348f Parents: d324164 Author: Konstantin V Shvachko Authored: Fri Mar 13 12:39:01 2015 -0700 Committer: Konstantin V Shvachko Committed: Fri Mar 13 13:12:51 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/namenode/snapshot/FileDiffList.java | 19 +++++++++++-- .../hdfs/server/namenode/TestFileTruncate.java | 30 ++++++++++++++++++++ 3 files changed, 49 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ac7e096..a149f18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1148,6 +1148,9 @@ Release 2.7.0 - UNRELEASED HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not idempotent (Tsz Wo Nicholas Sze via brandonli) + HDFS-7903. Cannot recover block after truncate and delete snapshot. + (Plamen Jeliazkov via shv) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java index 0c94554..5c9e121 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import java.util.Collections; import java.util.List; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeFile; @@ -125,9 +128,19 @@ public class FileDiffList extends continue; break; } - // Collect the remaining blocks of the file - while(i < removedBlocks.length) { - collectedBlocks.addDeleteBlock(removedBlocks[i++]); + // Check if last block is part of truncate recovery + BlockInfoContiguous lastBlock = file.getLastBlock(); + Block dontRemoveBlock = null; + if(lastBlock != null && lastBlock.getBlockUCState().equals( + HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { + dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) + .getTruncateBlock(); + } + // Collect the remaining blocks of the file, ignoring truncate block + for(;i < removedBlocks.length; i++) { + if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) { + collectedBlocks.addDeleteBlock(removedBlocks[i]); + } } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 260d8bb..3b6e107 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -178,6 +178,36 @@ public class TestFileTruncate { fs.delete(dir, true); } + /** Truncate the same file multiple times until its size is zero. */ + @Test + public void testSnapshotTruncateThenDeleteSnapshot() throws IOException { + Path dir = new Path("/testSnapshotTruncateThenDeleteSnapshot"); + fs.mkdirs(dir); + fs.allowSnapshot(dir); + final Path p = new Path(dir, "file"); + final byte[] data = new byte[BLOCK_SIZE]; + DFSUtil.getRandom().nextBytes(data); + writeContents(data, data.length, p); + final String snapshot = "s0"; + fs.createSnapshot(dir, snapshot); + Block lastBlock = getLocatedBlocks(p).getLastLocatedBlock() + .getBlock().getLocalBlock(); + final int newLength = data.length - 1; + assert newLength % BLOCK_SIZE != 0 : + " newLength must not be multiple of BLOCK_SIZE"; + final boolean isReady = fs.truncate(p, newLength); + LOG.info("newLength=" + newLength + ", isReady=" + isReady); + assertEquals("File must be closed for truncating at the block boundary", + isReady, newLength % BLOCK_SIZE == 0); + fs.deleteSnapshot(dir, snapshot); + if (!isReady) { + checkBlockRecovery(p); + } + checkFullFile(p, newLength, data); + assertBlockNotPresent(lastBlock); + fs.delete(dir, true); + } + /** * Truncate files and then run other operations such as * rename, set replication, set permission, etc.