Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 86921200AF7 for ; Tue, 14 Jun 2016 15:31:36 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 85801160A47; Tue, 14 Jun 2016 13:31:36 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id CAB4E160A06 for ; Tue, 14 Jun 2016 15:31:35 +0200 (CEST) Received: (qmail 40180 invoked by uid 500); 14 Jun 2016 13:31:35 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 40168 invoked by uid 99); 14 Jun 2016 13:31:34 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 14 Jun 2016 13:31:34 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id BFC3FDFC61; Tue, 14 Jun 2016 13:31:34 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kihwal@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-8581. ContentSummary on / skips further counts on yielding lock (contributed by J.Andreina) Date: Tue, 14 Jun 2016 13:31:34 +0000 (UTC) archived-at: Tue, 14 Jun 2016 13:31:36 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2.6 a2d960b6d -> 867b29be9 HDFS-8581. ContentSummary on / skips further counts on yielding lock (contributed by J.Andreina) (cherry picked from commit 4014ce5990bff9b0ecb3d38a633d40eaf6cf07a7) (cherry picked from commit 8854cdd9eefd05c10d0518528a3bff6a7348f37e) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/867b29be Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/867b29be Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/867b29be Branch: refs/heads/branch-2.6 Commit: 867b29be9c5a2f01bfef3b4c361e5ed85d438660 Parents: a2d960b Author: Kihwal Lee Authored: Tue Jun 14 08:31:00 2016 -0500 Committer: Kihwal Lee Committed: Tue Jun 14 08:31:00 2016 -0500 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/INodeDirectory.java | 2 +- .../java/org/apache/hadoop/hdfs/TestQuota.java | 31 ++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/867b29be/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 84bdbf8..cc27d77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -27,6 +27,9 @@ Release 2.6.5 - UNRELEASED HDFS-10271. Extra bytes are getting released from reservedSpace for append (Brahma Reddy Battula via vinayakumarb) + HDFS-8581. ContentSummary on / skips further counts on yielding lock + (J.Andreina via vinayakumarb) + Release 2.6.4 - 2016-02-11 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/867b29be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index a753230..44e8f6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -638,7 +638,7 @@ public class INodeDirectory extends INodeWithAdditionalFields continue; } // The locks were released and reacquired. Check parent first. - if (getParent() == null) { + if (!isRoot() && getParent() == null) { // Stop further counting and return whatever we have so far. break; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/867b29be/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 6e93a91..6fab668 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -21,11 +21,13 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import java.io.IOException; import java.io.OutputStream; import java.security.PrivilegedExceptionAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -921,4 +923,33 @@ public class TestQuota { cluster.shutdown(); } } + + /** + * File count on root , should return total value of files in Filesystem + * when one folder contains files more than "dfs.content-summary.limit". + */ + @Test + public void testHugeFileCount() throws IOException { + MiniDFSCluster cluster = null; + Configuration conf = new Configuration(); + conf.setInt("dfs.content-summary.limit", 4); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + for (int i = 1; i <= 5; i++) { + FSDataOutputStream out = + dfs.create(new Path("/Folder1/" + "file" + i),(short)1); + out.close(); + } + FSDataOutputStream out = dfs.create(new Path("/Folder2/file6"),(short)1); + out.close(); + ContentSummary contentSummary = dfs.getContentSummary(new Path("/")); + assertEquals(6, contentSummary.getFileCount()); + } finally { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org