Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 3703617EFE for ; Thu, 19 Mar 2015 01:44:32 +0000 (UTC) Received: (qmail 51066 invoked by uid 500); 19 Mar 2015 01:44:32 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 50993 invoked by uid 500); 19 Mar 2015 01:44:32 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 50984 invoked by uid 99); 19 Mar 2015 01:44:31 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 19 Mar 2015 01:44:31 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9CE57E18FF; Thu, 19 Mar 2015 01:44:31 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jing9@apache.org To: common-commits@hadoop.apache.org Message-Id: <0945d00592cc4b568056da82329c30be@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao. Date: Thu, 19 Mar 2015 01:44:31 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk 8234fd0e1 -> bee5a6a64 HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bee5a6a6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bee5a6a6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bee5a6a6 Branch: refs/heads/trunk Commit: bee5a6a64a1c037308fa4d52249be39c82791590 Parents: 8234fd0 Author: Jing Zhao Authored: Wed Mar 18 18:40:59 2015 -0700 Committer: Jing Zhao Committed: Wed Mar 18 18:40:59 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSDirConcatOp.java | 26 ++++++++++++++++++-- .../hdfs/server/namenode/TestHDFSConcat.java | 17 +++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bee5a6a6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 385c39b..d9d9e1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1199,6 +1199,9 @@ Release 2.7.0 - UNRELEASED HDFS-7945. The WebHdfs system on DN does not honor the length parameter. (wheat9) + HDFS-7943. Append cannot handle the last block with length greater than + the preferred block size. (jing9) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode http://git-wip-us.apache.org/repos/asf/hadoop/blob/bee5a6a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 5ccd3ea..31a6af7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -34,6 +34,16 @@ import java.util.List; import static org.apache.hadoop.util.Time.now; +/** + * Restrictions for a concat operation: + *
+ * 1. the src file and the target file are in the same dir
+ * 2. all the source files are not in snapshot
+ * 3. any source file cannot be the same with the target file
+ * 4. source files cannot be under construction or empty
+ * 5. source file's preferred block size cannot be greater than the target file
+ * 
+ */ class FSDirConcatOp { static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, @@ -123,14 +133,25 @@ class FSDirConcatOp { throw new SnapshotException("Concat: the source file " + src + " is referred by some other reference in some snapshot."); } + // source file cannot be the same with the target file if (srcINode == targetINode) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.getPath()); } + // source file cannot be under construction or empty if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) { throw new HadoopIllegalArgumentException("concat: source file " + src + " is invalid or empty or underConstruction"); } + // source file's preferred block size cannot be greater than the target + // file + if (srcINodeFile.getPreferredBlockSize() > + targetINode.getPreferredBlockSize()) { + throw new HadoopIllegalArgumentException("concat: source file " + src + + " has preferred block size " + srcINodeFile.getPreferredBlockSize() + + " which is greater than the target file's preferred block size " + + targetINode.getPreferredBlockSize()); + } si.add(srcINodeFile); } @@ -143,9 +164,10 @@ class FSDirConcatOp { return si.toArray(new INodeFile[si.size()]); } - private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) { + private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, + INodeFile target, INodeFile[] srcList) { QuotaCounts deltas = new QuotaCounts.Builder().build(); - short targetRepl = target.getBlockReplication(); + final short targetRepl = target.getBlockReplication(); for (INodeFile src : srcList) { short srcRepl = src.getBlockReplication(); long fileSize = src.computeFileSize(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bee5a6a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index ddf5a3e..e1c3c0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -388,6 +389,22 @@ public class TestHDFSConcat { } catch (Exception e) { // exspected } + + // the source file's preferred block size cannot be greater than the target + { + final Path src1 = new Path(parentDir, "src1"); + DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L); + final Path src2 = new Path(parentDir, "src2"); + // create a file whose preferred block size is greater than the target + DFSTestUtil.createFile(dfs, src2, 1024, fileLen, + dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L); + try { + dfs.concat(trg, new Path[] {src1, src2}); + fail("didn't fail for src with greater preferred block size"); + } catch (Exception e) { + GenericTestUtils.assertExceptionContains("preferred block size", e); + } + } } /**