Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9AF649EF1 for ; Wed, 18 Jul 2012 03:47:20 +0000 (UTC) Received: (qmail 12958 invoked by uid 500); 18 Jul 2012 03:47:20 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 12563 invoked by uid 500); 18 Jul 2012 03:47:20 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 12328 invoked by uid 99); 18 Jul 2012 03:47:13 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 18 Jul 2012 03:47:13 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 18 Jul 2012 03:47:08 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 9099E23889E2; Wed, 18 Jul 2012 03:46:49 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1362753 [3/7] - in /hadoop/common/trunk/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/test/java/org/apache/... Date: Wed, 18 Jul 2012 03:46:36 -0000 To: hdfs-commits@hadoop.apache.org From: atm@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120718034649.9099E23889E2@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestHarIndexParser.java Wed Jul 18 03:46:28 2012 @@ -17,25 +17,30 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStreamWriter; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -public class TestHarIndexParser extends TestCase { +public class TestHarIndexParser { final static Log LOG = LogFactory.getLog(TestHarIndexParser.class); File indexFile = null; - protected void setUp() throws FileNotFoundException, IOException { + @Before + public void setUp() throws FileNotFoundException, IOException { LOG.info("TestHarIndexParser.setUp()"); indexFile = File.createTempFile("harindex", ".tmp"); indexFile.deleteOnExit(); @@ -51,12 +56,14 @@ public class TestHarIndexParser extends out.close(); } - protected void tearDown() { + @After + public void tearDown() { LOG.info("TestHarIndexParser.tearDown()"); if (indexFile != null) indexFile.delete(); } + @Test public void testHarIndexParser() throws UnsupportedEncodingException, IOException { LOG.info("testHarIndexParser started."); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidFilter.java Wed Jul 18 03:46:28 2012 @@ -17,25 +17,25 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; -import java.io.IOException; import java.util.ArrayList; -import java.util.List; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.raid.protocol.PolicyInfo; import org.apache.hadoop.util.Time; +import org.junit.Test; -public class TestRaidFilter extends TestCase { +public class TestRaidFilter { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static Log LOG = @@ -59,6 +59,7 @@ public class TestRaidFilter extends Test if (dfs != null) { dfs.shutdown(); } } + @Test public void testLayeredPolicies() throws Exception { mySetup(); Path src1 = new Path("/user/foo"); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidHar.java Wed Jul 18 03:46:28 2012 @@ -17,31 +17,32 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; +import java.io.FileWriter; import java.util.Random; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.log4j.Level; - -import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Level; +import org.junit.Test; /** * If a file gets deleted, then verify that the parity file gets deleted too. */ -public class TestRaidHar extends TestCase { +public class TestRaidHar { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static String CONFIG_FILE = new File(TEST_DIR, @@ -182,6 +183,7 @@ public class TestRaidHar extends TestCas * Test that parity files that do not have an associated master file * get deleted. */ + @Test public void testRaidHar() throws Exception { LOG.info("Test testRaidHar started."); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidNode.java Wed Jul 18 03:46:28 2012 @@ -17,26 +17,26 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; +import java.io.FileWriter; import java.io.IOException; import java.util.List; import java.util.Random; import java.util.zip.CRC32; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; @@ -45,14 +45,16 @@ import org.apache.hadoop.mapreduce.serve import org.apache.hadoop.raid.protocol.PolicyInfo; import org.apache.hadoop.raid.protocol.PolicyList; import org.apache.hadoop.util.JarFinder; -import org.apache.hadoop.raid.protocol.PolicyInfo.ErasureCodeType; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.junit.Test; /** * Test the generation of parity blocks for files with different block * sizes. Also test that a data block can be regenerated from a raid stripe * using the parity block */ -public class TestRaidNode extends TestCase { +public class TestRaidNode { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); public static final String DistRaid_JAR = JarFinder.getJar(DistRaid.class); @@ -258,6 +260,7 @@ public class TestRaidNode extends TestCa /** * Test to run a filter */ + @Test public void testPathFilter() throws Exception { LOG.info("Test testPathFilter started."); @@ -513,6 +516,7 @@ public class TestRaidNode extends TestCa /** * Test dist Raid */ + @Test public void testDistRaid() throws Exception { LOG.info("Test testDistRaid started."); long targetReplication = 2; @@ -664,6 +668,7 @@ public class TestRaidNode extends TestCa } } + @Test public void testSuspendTraversal() throws Exception { LOG.info("Test testSuspendTraversal started."); long targetReplication = 2; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidPurge.java Wed Jul 18 03:46:28 2012 @@ -17,48 +17,37 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.ArrayList; -import java.util.Collection; -import java.util.GregorianCalendar; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; +import java.io.FileWriter; import java.util.Random; -import java.util.zip.CRC32; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.log4j.Level; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.raid.protocol.PolicyInfo; -import org.apache.hadoop.raid.protocol.PolicyList; import org.apache.hadoop.hdfs.TestRaidDfs; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; import org.apache.hadoop.raid.protocol.PolicyInfo; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.apache.log4j.Level; +import org.junit.Test; /** * If a file gets deleted, then verify that the parity file gets deleted too. */ -public class TestRaidPurge extends TestCase { +public class TestRaidPurge { final static String TEST_DIR = new File(System.getProperty("test.build.data", "target/test-data")).getAbsolutePath(); final static String CONFIG_FILE = new File(TEST_DIR, @@ -206,6 +195,7 @@ public class TestRaidPurge extends TestC * Test that parity files that do not have an associated master file * get deleted. */ + @Test public void testPurge() throws Exception { LOG.info("Test testPurge started."); @@ -312,6 +302,7 @@ public class TestRaidPurge extends TestC * Create a file, wait for parity file to get HARed. Then modify the file, * wait for the HAR to get purged. */ + @Test public void testPurgeHar() throws Exception { LOG.info("testPurgeHar started"); int harDelay = 0; @@ -381,6 +372,7 @@ public class TestRaidPurge extends TestC * Create parity file, delete original file's directory and then validate that * parity directory is automatically deleted. */ + @Test public void testPurgeDirectory() throws Exception { long stripeLength = 5; long blockSize = 8192; @@ -433,6 +425,7 @@ public class TestRaidPurge extends TestC /** * Test that an XOR parity file is removed when a RS parity file is detected. */ + @Test public void testPurgePreference() throws Exception { createClusters(true); Path dir = new Path("/user/test/raidtest/"); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShell.java Wed Jul 18 03:46:28 2012 @@ -17,34 +17,35 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.Random; import java.util.zip.CRC32; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.RaidDFSUtil; +import org.apache.hadoop.hdfs.TestRaidDfs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.TestRaidDfs; -import org.apache.hadoop.hdfs.RaidDFSUtil; -import org.apache.hadoop.raid.RaidNode; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; -public class TestRaidShell extends TestCase { +public class TestRaidShell { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestRaidShell"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -65,6 +66,7 @@ public class TestRaidShell extends TestC * Create a file with three stripes, corrupt a block each in two stripes, * and wait for the the file to be fixed. */ + @Test public void testBlockFix() throws Exception { LOG.info("Test testBlockFix started."); long blockSize = 8192L; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestRaidShellFsck.java Wed Jul 18 03:46:28 2012 @@ -17,34 +17,31 @@ */ package org.apache.hadoop.raid; +import static org.junit.Assert.assertTrue; + import java.io.File; -import java.io.FileWriter; import java.io.FileNotFoundException; +import java.io.FileWriter; import java.io.IOException; import java.util.Random; -import org.junit.Test; -import org.junit.After; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.RaidDFSUtil; +import org.apache.hadoop.hdfs.TestRaidDfs; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.TestRaidDfs; -import org.apache.hadoop.hdfs.RaidDFSUtil; -import org.apache.hadoop.raid.RaidNode; -import org.apache.hadoop.raid.HarIndex; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.ToolRunner; +import org.junit.After; +import org.junit.Test; public class TestRaidShellFsck { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonDecoder.java Wed Jul 18 03:46:28 2012 @@ -18,27 +18,29 @@ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.RaidDFSUtil; import org.apache.hadoop.hdfs.TestRaidDfs; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.mapred.Reporter; +import org.junit.Test; -public class TestReedSolomonDecoder extends TestCase { +public class TestReedSolomonDecoder { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestReedSolomonDecoder"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -49,6 +51,7 @@ public class TestReedSolomonDecoder exte MiniDFSCluster dfs = null; FileSystem fileSys = null; + @Test public void testDecoder() throws Exception { mySetup(); int stripeSize = 10; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs-raid/src/test/java/org/apache/hadoop/raid/TestReedSolomonEncoder.java Wed Jul 18 03:46:28 2012 @@ -18,34 +18,23 @@ package org.apache.hadoop.raid; +import static org.junit.Assert.assertEquals; + import java.io.File; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.DistributedRaidFileSystem; import org.apache.hadoop.hdfs.TestRaidDfs; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.raid.RaidNode; +import org.junit.Test; -public class TestReedSolomonEncoder extends TestCase { +public class TestReedSolomonEncoder { final static Log LOG = LogFactory.getLog( "org.apache.hadoop.raid.TestReedSolomonEncoder"); final static String TEST_DIR = new File(System.getProperty("test.build.data", @@ -57,6 +46,7 @@ public class TestReedSolomonEncoder exte MiniDFSCluster dfs = null; FileSystem fileSys = null; + @Test public void testEncoder() throws Exception { mySetup(); int stripeSize = 10; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Jul 18 03:46:28 2012 @@ -109,6 +109,8 @@ Trunk (unreleased changes) HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay) + HDFS-3583. Convert remaining tests to Junit4. (Andrew Wang via atm) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java Wed Jul 18 03:46:28 2012 @@ -17,7 +17,11 @@ */ package org.apache.hadoop.cli; -import org.apache.hadoop.cli.util.*; +import org.apache.hadoop.cli.util.CLICommandDFSAdmin; +import org.apache.hadoop.cli.util.CLICommandTypes; +import org.apache.hadoop.cli.util.CLITestCmd; +import org.apache.hadoop.cli.util.CommandExecutor; +import org.apache.hadoop.cli.util.FSCmdExecutor; import org.apache.hadoop.hdfs.tools.DFSAdmin; public class CLITestCmdDFS extends CLITestCmd { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java Wed Jul 18 03:46:28 2012 @@ -18,6 +18,8 @@ package org.apache.hadoop.cli; +import static org.junit.Assert.assertTrue; + import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CommandExecutor.Result; import org.apache.hadoop.fs.FileSystem; @@ -27,7 +29,6 @@ import org.apache.hadoop.hdfs.HDFSPolicy import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.authorize.PolicyProvider; import org.junit.After; -import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Wed Jul 18 03:46:28 2012 @@ -17,19 +17,23 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; -import junit.framework.TestCase; +public class TestGlobPaths { -public class TestGlobPaths extends TestCase { - static class RegexPathFilter implements PathFilter { - + private final String regex; public RegexPathFilter(String regex) { this.regex = regex; @@ -41,15 +45,15 @@ public class TestGlobPaths extends TestC } } - + static private MiniDFSCluster dfsCluster; static private FileSystem fs; static final private int NUM_OF_PATHS = 4; static final String USER_DIR = "/user/"+System.getProperty("user.name"); private Path[] path = new Path[NUM_OF_PATHS]; - - @Override - protected void setUp() throws Exception { + + @Before + public void setUp() throws Exception { try { Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); @@ -59,13 +63,14 @@ public class TestGlobPaths extends TestC } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { if(dfsCluster!=null) { dfsCluster.shutdown(); } } + @Test public void testPathFilter() throws IOException { try { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" }; @@ -78,6 +83,7 @@ public class TestGlobPaths extends TestC } } + @Test public void testPathFilterWithFixedLastComponent() throws IOException { try { String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b", @@ -91,6 +97,7 @@ public class TestGlobPaths extends TestC } } + @Test public void testGlob() throws Exception { //pTestEscape(); // need to wait until HADOOP-1995 is fixed pTestJavaRegexSpecialChars(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Wed Jul 18 03:46:28 2012 @@ -18,6 +18,9 @@ package org.apache.hadoop.fs; +import static org.apache.hadoop.fs.FileContextTestHelper.exists; +import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath; + import java.io.IOException; import java.net.URISyntaxException; @@ -27,8 +30,8 @@ import org.apache.hadoop.fs.Options.Rena import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.AfterClass; @@ -37,8 +40,6 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import static org.apache.hadoop.fs.FileContextTestHelper.*; - public class TestHDFSFileContextMainOperations extends FileContextMainOperationsBaseTest { private static MiniDFSCluster cluster; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java Wed Jul 18 03:46:28 2012 @@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java Wed Jul 18 03:46:28 2012 @@ -17,6 +17,9 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -25,19 +28,15 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URL; -import junit.framework.TestCase; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsUrlStreamHandlerFactory; -import org.apache.hadoop.fs.Path; +import org.junit.Test; /** * Test of the URL stream handler factory. */ -public class TestUrlStreamHandler extends TestCase { +public class TestUrlStreamHandler { /** * Test opening and reading from an InputStream through a hdfs:// URL. @@ -47,6 +46,7 @@ public class TestUrlStreamHandler extend * * @throws IOException */ + @Test public void testDfsUrls() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -105,6 +105,7 @@ public class TestUrlStreamHandler extend * @throws IOException * @throws URISyntaxException */ + @Test public void testFileUrls() throws IOException, URISyntaxException { // URLStreamHandler is already set in JVM by testDfsUrls() Configuration conf = new HdfsConfiguration(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java Wed Jul 18 03:46:28 2012 @@ -17,6 +17,8 @@ */ package org.apache.hadoop.fs.loadGenerator; +import static org.junit.Assert.assertEquals; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; @@ -27,9 +29,6 @@ import org.apache.hadoop.conf.Configured import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; - -import static org.junit.Assert.*; - import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java Wed Jul 18 03:46:28 2012 @@ -17,9 +17,12 @@ */ package org.apache.hadoop.fs.permission; -import java.io.IOException; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import junit.framework.TestCase; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -32,8 +35,9 @@ import org.apache.hadoop.hdfs.HdfsConfig import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Test; -public class TestStickyBit extends TestCase { +public class TestStickyBit { static UserGroupInformation user1 = UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"}); @@ -158,6 +162,7 @@ public class TestStickyBit extends TestC assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit()); } + @Test public void testGeneralSBBehavior() throws IOException, InterruptedException { MiniDFSCluster cluster = null; try { @@ -195,6 +200,7 @@ public class TestStickyBit extends TestC * Test that one user can't rename/move another user's file when the sticky * bit is set. */ + @Test public void testMovingFiles() throws IOException, InterruptedException { MiniDFSCluster cluster = null; @@ -243,6 +249,7 @@ public class TestStickyBit extends TestC * the sticky bit back on re-start, and that no extra sticky bits appear after * re-start. */ + @Test public void testStickyBitPersistence() throws IOException { MiniDFSCluster cluster = null; try { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java Wed Jul 18 03:46:28 2012 @@ -18,6 +18,19 @@ package org.apache.hadoop.fs.viewfs; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -25,38 +38,17 @@ import java.net.URISyntaxException; import javax.security.auth.login.LoginException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.fs.FsServerDefaults; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; /** * Tests for viewfs implementation of default fs level values. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java Wed Jul 18 03:46:28 2012 @@ -23,6 +23,9 @@ package org.apache.hadoop.fs.viewfs; * Since viewfs has overlayed ViewFsFileStatus, we ran into * serialization problems. THis test is test the fix. */ +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -40,11 +43,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.UserGroupInformation; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.*; public class TestViewFsFileStatusHdfs { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java Wed Jul 18 03:46:28 2012 @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.io.OutputStream; import java.util.Random; -import junit.framework.Assert; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -121,16 +120,16 @@ public class AppendTestUtil { FSDataInputStream in = fs.open(p); if (in.getWrappedStream() instanceof DFSInputStream) { long len = ((DFSInputStream)in.getWrappedStream()).getFileLength(); - TestCase.assertEquals(length, len); + assertEquals(length, len); } else { - TestCase.assertEquals(length, status.getLen()); + assertEquals(length, status.getLen()); } for(i++; i < length; i++) { - TestCase.assertEquals((byte)i, (byte)in.read()); + assertEquals((byte)i, (byte)in.read()); } i = -(int)length; - TestCase.assertEquals(-1, in.read()); //EOF + assertEquals(-1, in.read()); //EOF in.close(); } catch(IOException ioe) { throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe); @@ -175,7 +174,7 @@ public class AppendTestUtil { private static void checkData(final byte[] actual, int from, final byte[] expected, String message) { for (int idx = 0; idx < actual.length; idx++) { - Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+ + assertEquals(message+" byte "+(from+idx)+" differs. expected "+ expected[from+idx]+" actual "+actual[idx], expected[from+idx], actual[idx]); actual[idx] = 0; @@ -189,7 +188,7 @@ public class AppendTestUtil { final FSDataOutputStream out = fs.create(p, (short)1); out.write(bytes); out.close(); - Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen()); + assertEquals(bytes.length, fs.getFileStatus(p).getLen()); } for(int i = 2; i < 500; i++) { @@ -197,7 +196,7 @@ public class AppendTestUtil { final FSDataOutputStream out = fs.append(p); out.write(bytes); out.close(); - Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen()); + assertEquals(i*bytes.length, fs.getFileStatus(p).getLen()); } } } \ No newline at end of file Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java Wed Jul 18 03:46:28 2012 @@ -36,7 +36,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - import org.apache.log4j.Level; /** Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Wed Jul 18 03:46:28 2012 @@ -18,25 +18,26 @@ package org.apache.hadoop.hdfs; -import java.net.Socket; -import java.net.InetSocketAddress; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.DataOutputStream; -import java.util.Random; -import java.util.List; import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.List; +import java.util.Random; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.net.NetUtils; -import static org.junit.Assert.*; - /** * A helper class to setup the cluster, and get to BlockReader and DataNode for a block. */ Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Jul 18 03:46:28 2012 @@ -55,7 +55,6 @@ import org.apache.hadoop.fs.FSDataOutput import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSTestUtil.Builder; import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java Wed Jul 18 03:46:28 2012 @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs; +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -27,8 +29,6 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; -import java.io.IOException; - /** This is a comprehensive append test that tries * all combinations of file length and number of appended bytes * In each iteration, it creates a file of len1. Then reopen Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java Wed Jul 18 03:46:28 2012 @@ -17,18 +17,20 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.fail; + import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; - -import static org.junit.Assert.*; import org.junit.After; import org.junit.Before; import org.junit.Test; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java Wed Jul 18 03:46:28 2012 @@ -17,27 +17,27 @@ */ package org.apache.hadoop.hdfs; -import java.util.ArrayList; +import static org.junit.Assert.assertEquals; -import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; +import java.util.ArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.junit.Test; /** * This test ensures that the balancer bandwidth is dynamically adjusted * correctly. */ -public class TestBalancerBandwidth extends TestCase { +public class TestBalancerBandwidth { final static private Configuration conf = new Configuration(); final static private int NUM_OF_DATANODES = 2; final static private int DEFAULT_BANDWIDTH = 1024*1024; public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class); + @Test public void testBalancerBandwidth() throws Exception { /* Set bandwidthPerSec to a low value of 1M bps. */ conf.setLong( Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java Wed Jul 18 03:46:28 2012 @@ -17,26 +17,24 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertTrue; + import java.io.File; import java.io.IOException; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.BlockMissingException; +import org.junit.Test; -public class TestBlockMissingException extends TestCase { +public class TestBlockMissingException { final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing"); final static int NUM_DATANODES = 3; @@ -47,6 +45,7 @@ public class TestBlockMissingException e /** * Test DFS Raid */ + @Test public void testBlockMissingException() throws Exception { LOG.info("Test testBlockMissingException started."); long blockSize = 1024L; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Wed Jul 18 03:46:28 2012 @@ -17,24 +17,26 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.util.ArrayList; -import junit.framework.TestCase; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.junit.Test; /** * This class tests DatanodeDescriptor.getBlocksScheduled() at the * NameNode. This counter is supposed to keep track of blocks currently * scheduled to a datanode. */ -public class TestBlocksScheduledCounter extends TestCase { +public class TestBlocksScheduledCounter { + @Test public void testBlocksScheduledCounter() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .build(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java Wed Jul 18 03:46:28 2012 @@ -18,21 +18,20 @@ package org.apache.hadoop.hdfs; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + import java.util.List; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.fs.Path; import org.apache.log4j.Level; - -import org.junit.Test; import org.junit.AfterClass; import org.junit.BeforeClass; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.never; +import org.junit.Test; public class TestClientBlockVerification { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Wed Jul 18 03:46:28 2012 @@ -26,11 +26,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.io.IOUtils; - -import org.junit.Test; import org.junit.Assert; +import org.junit.Test; /** * This tests pipeline recovery related client protocol works correct or not. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Wed Jul 18 03:46:28 2012 @@ -17,37 +17,33 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.spy; + +import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; -import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSInputStream; -import org.apache.hadoop.hdfs.SocketCache; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.io.IOUtils; - import org.apache.hadoop.security.token.Token; -import org.junit.Test; import org.junit.AfterClass; import org.junit.BeforeClass; -import static org.junit.Assert.*; - +import org.junit.Test; import org.mockito.Matchers; import org.mockito.Mockito; -import org.mockito.stubbing.Answer; import org.mockito.invocation.InvocationOnMock; -import static org.mockito.Mockito.spy; +import org.mockito.stubbing.Answer; /** * This class tests the client connection caching in a single node Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java Wed Jul 18 03:46:28 2012 @@ -18,21 +18,23 @@ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.io.File; -import java.io.RandomAccessFile; import java.io.IOException; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Random; -import org.junit.Test; -import static org.junit.Assert.*; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; +import org.junit.Test; /** * A JUnit test for corrupted file handling. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java Wed Jul 18 03:46:28 2012 @@ -24,20 +24,25 @@ */ package org.apache.hadoop.hdfs; -import java.io.IOException; -import java.util.ArrayList; -import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.junit.Test; -public class TestDFSAddressConfig extends TestCase { +public class TestDFSAddressConfig { + @Test public void testDFSAddressConfig() throws IOException { Configuration conf = new HdfsConfiguration(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Wed Jul 18 03:46:28 2012 @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.fail; + import java.io.IOException; import java.io.OutputStream; -import org.junit.*; -import static org.junit.Assert.fail; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.junit.Test; /** Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Jul 18 03:46:28 2012 @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyString; @@ -38,8 +42,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -75,6 +77,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; +import org.junit.Test; import org.mockito.Mockito; import org.mockito.internal.stubbing.answers.ThrowsException; import org.mockito.invocation.InvocationOnMock; @@ -86,7 +89,7 @@ import com.google.common.base.Joiner; * These tests make sure that DFSClient retries fetching data from DFS * properly in case of errors. */ -public class TestDFSClientRetries extends TestCase { +public class TestDFSClientRetries { private static final String ADDRESS = "0.0.0.0"; final static private int PING_INTERVAL = 1000; final static private int MIN_SLEEP_TIME = 1000; @@ -146,6 +149,7 @@ public class TestDFSClientRetries extend * This makes sure that when DN closes clients socket after client had * successfully connected earlier, the data can still be fetched. */ + @Test public void testWriteTimeoutAtDataNode() throws IOException, InterruptedException { final int writeTimeout = 100; //milliseconds. @@ -198,6 +202,7 @@ public class TestDFSClientRetries extend * of times trying to add a block */ @SuppressWarnings("serial") + @Test public void testNotYetReplicatedErrors() throws IOException { final String exceptionMsg = "Nope, not replicated yet..."; @@ -242,6 +247,7 @@ public class TestDFSClientRetries extend * operation, and not over the lifetime of the stream. It is a regression * test for HDFS-127. */ + @Test public void testFailuresArePerOperation() throws Exception { long fileSize = 4096; @@ -317,6 +323,7 @@ public class TestDFSClientRetries extend * a client to safely retry a call and still produce a correct * file. See HDFS-3031. */ + @Test public void testIdempotentAllocateBlockAndClose() throws Exception { final String src = "/testIdempotentAllocateBlock"; Path file = new Path(src); @@ -457,6 +464,7 @@ public class TestDFSClientRetries extend /** * Test that a DFSClient waits for random time before retry on busy blocks. */ + @Test public void testDFSClientRetriesOnBusyBlocks() throws IOException { System.out.println("Testing DFSClient random waiting on busy blocks."); @@ -700,6 +708,7 @@ public class TestDFSClientRetries extend public int get() { return counter; } } + @Test public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); @@ -736,6 +745,7 @@ public class TestDFSClientRetries extend * RPC to the server and set rpcTimeout to less than n and ensure * that socketTimeoutException is obtained */ + @Test public void testClientDNProtocolTimeout() throws IOException { final Server server = new TestServer(1, true); server.start(); @@ -770,6 +780,7 @@ public class TestDFSClientRetries extend * read call, so the client should expect consecutive calls to behave the same * way. See HDFS-3067. */ + @Test public void testRetryOnChecksumFailure() throws UnresolvedLinkException, IOException { HdfsConfiguration conf = new HdfsConfiguration(); @@ -812,6 +823,7 @@ public class TestDFSClientRetries extend } /** Test client retry with namenode restarting. */ + @Test public void testNamenodeRestart() throws Exception { ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); @@ -937,6 +949,7 @@ public class TestDFSClientRetries extend } } + @Test public void testMultipleLinearRandomRetry() { parseMultipleLinearRandomRetry(null, ""); parseMultipleLinearRandomRetry(null, "11"); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java Wed Jul 18 03:46:28 2012 @@ -17,17 +17,21 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + import java.io.File; import java.util.Collections; import java.util.List; -import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.junit.After; +import org.junit.Test; import com.google.common.collect.Lists; @@ -35,7 +39,7 @@ import com.google.common.collect.Lists; * This test ensures the appropriate response from the system when * the system is finalized. */ -public class TestDFSFinalize extends TestCase { +public class TestDFSFinalize { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSFinalize"); @@ -86,6 +90,7 @@ public class TestDFSFinalize extends Tes /** * This test attempts to finalize the NameNode and DataNode. */ + @Test public void testFinalize() throws Exception { UpgradeUtilities.initialize(); @@ -125,8 +130,8 @@ public class TestDFSFinalize extends Tes } // end numDir loop } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java Wed Jul 18 03:46:28 2012 @@ -17,21 +17,27 @@ */ package org.apache.hadoop.hdfs; -import junit.framework.TestCase; -import java.io.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.DataOutputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Time; +import org.junit.Test; /** * This class tests that the DFS command mkdirs cannot create subdirectories * from a file when passed an illegal path. HADOOP-281. */ -public class TestDFSMkdirs extends TestCase { +public class TestDFSMkdirs { private void writeFile(FileSystem fileSys, Path name) throws IOException { DataOutputStream stm = fileSys.create(name); @@ -43,6 +49,7 @@ public class TestDFSMkdirs extends TestC * Tests mkdirs can create a directory that does not exist and will * not create a subdirectory off a file. */ + @Test public void testDFSMkdirs() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); @@ -80,6 +87,7 @@ public class TestDFSMkdirs extends TestC /** * Tests mkdir will not create directory when parent is missing. */ + @Test public void testMkdir() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java Wed Jul 18 03:46:28 2012 @@ -17,14 +17,15 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Random; -import junit.framework.AssertionFailedError; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -35,13 +36,15 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; /** Unit tests for permission */ -public class TestDFSPermission extends TestCase { +public class TestDFSPermission { public static final Log LOG = LogFactory.getLog(TestDFSPermission.class); final private static Configuration conf = new HdfsConfiguration(); @@ -106,13 +109,13 @@ public class TestDFSPermission extends T } } - @Override + @Before public void setUp() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); } - @Override + @After public void tearDown() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -122,6 +125,7 @@ public class TestDFSPermission extends T /** This tests if permission setting in create, mkdir, and * setPermission works correctly */ + @Test public void testPermissionSetting() throws Exception { testPermissionSetting(OpType.CREATE); // test file creation testPermissionSetting(OpType.MKDIRS); // test directory creation @@ -257,6 +261,7 @@ public class TestDFSPermission extends T * check that ImmutableFsPermission can be used as the argument * to setPermission */ + @Test public void testImmutableFsPermission() throws IOException { fs = FileSystem.get(conf); @@ -266,6 +271,7 @@ public class TestDFSPermission extends T } /* check if the ownership of a file/directory is set correctly */ + @Test public void testOwnership() throws Exception { testOwnership(OpType.CREATE); // test file creation testOwnership(OpType.MKDIRS); // test directory creation @@ -354,6 +360,7 @@ public class TestDFSPermission extends T /* Check if namenode performs permission checking correctly for * superuser, file owner, group owner, and other users */ + @Test public void testPermissionChecking() throws Exception { try { fs = FileSystem.get(conf); @@ -533,7 +540,7 @@ public class TestDFSPermission extends T } catch(AccessControlException e) { assertTrue(expectPermissionDeny()); } - } catch (AssertionFailedError ae) { + } catch (AssertionError ae) { logPermissions(); throw ae; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java Wed Jul 18 03:46:28 2012 @@ -16,6 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; import java.io.IOException; @@ -26,8 +28,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.junit.Test; -public class TestDFSRemove extends junit.framework.TestCase { +public class TestDFSRemove { final Path dir = new Path("/test/remove/"); void list(FileSystem fs, String name) throws IOException { @@ -51,6 +54,7 @@ public class TestDFSRemove extends junit return total; } + @Test public void testRemove() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java Wed Jul 18 03:46:28 2012 @@ -16,6 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; import java.io.IOException; @@ -25,8 +28,9 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.Test; -public class TestDFSRename extends junit.framework.TestCase { +public class TestDFSRename { static int countLease(MiniDFSCluster cluster) { return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease(); } @@ -46,6 +50,7 @@ public class TestDFSRename extends junit a_out.close(); } + @Test public void testRename() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1362753&r1=1362752&r2=1362753&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java Wed Jul 18 03:46:28 2012 @@ -19,22 +19,25 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.util.Collections; import java.util.List; -import junit.framework.TestCase; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.collect.Lists; @@ -44,7 +47,7 @@ import com.google.common.collect.Lists; * the system when the system is rolled back under various storage state and * version conditions. */ -public class TestDFSRollback extends TestCase { +public class TestDFSRollback { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestDFSRollback"); @@ -131,6 +134,7 @@ public class TestDFSRollback extends Tes * This test attempts to rollback the NameNode and DataNode under * a number of valid and invalid conditions. */ + @Test public void testRollback() throws Exception { File[] baseDirs; UpgradeUtilities.initialize(); @@ -299,8 +303,8 @@ public class TestDFSRollback extends Tes } } - @Override - protected void tearDown() throws Exception { + @After + public void tearDown() throws Exception { LOG.info("Shutting down MiniDFSCluster"); if (cluster != null) cluster.shutdown(); }