Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E0151188B0 for ; Sat, 9 May 2015 00:42:15 +0000 (UTC) Received: (qmail 30341 invoked by uid 500); 9 May 2015 00:41:57 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 29207 invoked by uid 500); 9 May 2015 00:41:56 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 26714 invoked by uid 99); 9 May 2015 00:41:55 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 09 May 2015 00:41:55 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id CB098E4454; Sat, 9 May 2015 00:41:55 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zjshen@apache.org To: common-commits@hadoop.apache.org Date: Sat, 09 May 2015 00:42:38 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [44/50] hadoop git commit: HDFS-8097. TestFileTruncate is failing intermittently. (Contributed by Rakesh R) HDFS-8097. TestFileTruncate is failing intermittently. (Contributed by Rakesh R) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4245a6d7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4245a6d7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4245a6d7 Branch: refs/heads/YARN-2928 Commit: 4245a6d7538a20ef6567784617ecd9fe141ec839 Parents: ca2c225 Author: Arpit Agarwal Authored: Fri May 8 16:27:52 2015 -0700 Committer: Zhijie Shen Committed: Fri May 8 17:40:24 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/TestFileTruncate.java | 22 ++++++++------------ 2 files changed, 12 insertions(+), 13 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4245a6d7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b766e26..0b721ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -723,6 +723,9 @@ Release 2.8.0 - UNRELEASED HDFS-8326. Documentation about when checkpoints are run is out of date. (Misty Stanley-Jones via xyao) + HDFS-8097. TestFileTruncate is failing intermittently. (Rakesh R via + Arpit Agarwal) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/4245a6d7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index bd19a5b..1f65f78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -63,6 +63,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -87,6 +88,8 @@ public class TestFileTruncate { static MiniDFSCluster cluster; static DistributedFileSystem fs; + private Path parent; + @BeforeClass public static void startUp() throws IOException { conf = new HdfsConfiguration(); @@ -110,6 +113,12 @@ public class TestFileTruncate { if(cluster != null) cluster.shutdown(); } + @Before + public void setup() throws IOException { + parent = new Path("/test"); + fs.delete(parent, true); + } + /** * Truncate files of different sizes byte by byte. */ @@ -117,7 +126,6 @@ public class TestFileTruncate { public void testBasicTruncate() throws IOException { int startingFileSize = 3 * BLOCK_SIZE; - Path parent = new Path("/test"); fs.mkdirs(parent); fs.setQuota(parent, 100, 1000); byte[] contents = AppendTestUtil.initBuffer(startingFileSize); @@ -257,7 +265,6 @@ public class TestFileTruncate { */ void testSnapshotWithAppendTruncate(int ... deleteOrder) throws IOException { FSDirectory fsDir = cluster.getNamesystem().getFSDirectory(); - Path parent = new Path("/test"); fs.mkdirs(parent); fs.setQuota(parent, 100, 1000); fs.allowSnapshot(parent); @@ -421,7 +428,6 @@ public class TestFileTruncate { } void testSnapshotWithTruncates(int ... deleteOrder) throws IOException { - Path parent = new Path("/test"); fs.mkdirs(parent); fs.setQuota(parent, 100, 1000); fs.allowSnapshot(parent); @@ -664,7 +670,6 @@ public class TestFileTruncate { public void testTruncateWithDataNodesRestart() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); - final Path parent = new Path("/test"); final Path p = new Path(parent, "testTruncateWithDataNodesRestart"); writeContents(contents, startingFileSize, p); @@ -719,7 +724,6 @@ public class TestFileTruncate { public void testCopyOnTruncateWithDataNodesRestart() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); - final Path parent = new Path("/test"); final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart"); writeContents(contents, startingFileSize, p); @@ -779,7 +783,6 @@ public class TestFileTruncate { public void testTruncateWithDataNodesRestartImmediately() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); - final Path parent = new Path("/test"); final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately"); writeContents(contents, startingFileSize, p); @@ -839,7 +842,6 @@ public class TestFileTruncate { public void testTruncateWithDataNodesShutdownImmediately() throws Exception { int startingFileSize = 3 * BLOCK_SIZE; byte[] contents = AppendTestUtil.initBuffer(startingFileSize); - final Path parent = new Path("/test"); final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately"); writeContents(contents, startingFileSize, p); @@ -905,7 +907,6 @@ public class TestFileTruncate { */ @Test public void testUpgradeAndRestart() throws IOException { - Path parent = new Path("/test"); fs.mkdirs(parent); fs.setQuota(parent, 100, 1000); fs.allowSnapshot(parent); @@ -994,7 +995,6 @@ public class TestFileTruncate { FSNamesystem fsn = cluster.getNamesystem(); String client = "client"; String clientMachine = "clientMachine"; - Path parent = new Path("/test"); String src = "/test/testTruncateRecovery"; Path srcPath = new Path(src); @@ -1067,7 +1067,6 @@ public class TestFileTruncate { @Test public void testTruncateShellCommand() throws Exception { - final Path parent = new Path("/test"); final Path src = new Path("/test/testTruncateShellCommand"); final int oldLength = 2*BLOCK_SIZE + 1; final int newLength = BLOCK_SIZE + 1; @@ -1084,7 +1083,6 @@ public class TestFileTruncate { @Test public void testTruncateShellCommandOnBlockBoundary() throws Exception { - final Path parent = new Path("/test"); final Path src = new Path("/test/testTruncateShellCommandOnBoundary"); final int oldLength = 2 * BLOCK_SIZE; final int newLength = BLOCK_SIZE; @@ -1100,7 +1098,6 @@ public class TestFileTruncate { @Test public void testTruncateShellCommandWithWaitOption() throws Exception { - final Path parent = new Path("/test"); final Path src = new Path("/test/testTruncateShellCommandWithWaitOption"); final int oldLength = 2 * BLOCK_SIZE + 1; final int newLength = BLOCK_SIZE + 1; @@ -1136,7 +1133,6 @@ public class TestFileTruncate { public void testTruncate4Symlink() throws IOException { final int fileLength = 3 * BLOCK_SIZE; - final Path parent = new Path("/test"); fs.mkdirs(parent); final byte[] contents = AppendTestUtil.initBuffer(fileLength); final Path file = new Path(parent, "testTruncate4Symlink");