hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jgho...@apache.org
Subject svn commit: r1025788 [3/3] - in /hadoop/hdfs/branches/HDFS-1052: ./ src/contrib/fuse-dfs/src/test/ src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/ src/test/aop/org/apache/hadoop/fs/ src/test/aop/org/apache/hadoop/hdfs/ src/test/aop/org/apac...
Date Wed, 20 Oct 2010 22:58:56 GMT
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Wed Oct 20 22:58:52 2010
@@ -212,7 +212,7 @@ public class TestDirectoryScanner extend
   }
   
   public void runTest(int parallelism) throws Exception {
-    cluster = new MiniDFSCluster(CONF, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(CONF).build();
     try {
       cluster.waitActive();
       fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Wed Oct 20 22:58:52 2010
@@ -56,7 +56,7 @@ public class TestDiskError extends TestC
     // bring up a cluster of 3
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     final int dnIndex = 0;
@@ -87,7 +87,7 @@ public class TestDiskError extends TestC
   public void testReplicationError() throws Exception {
     // bring up a cluster of 1
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     
@@ -160,7 +160,7 @@ public class TestDiskError extends TestC
 
     try {
       // Start the cluster
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
 
       // Check permissions on directories in 'dfs.data.dir'

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Wed Oct 20 22:58:52 2010
@@ -76,7 +76,7 @@ public class TestInterDatanodeProtocol {
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, 3, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       cluster.waitActive();
 
       //create a file
@@ -216,7 +216,7 @@ public class TestInterDatanodeProtocol {
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, 3, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       cluster.waitActive();
 
       //create a file

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Wed Oct 20 22:58:52 2010
@@ -46,7 +46,7 @@ public class TestWriteToReplica {
   // test close
   @Test
   public void testClose() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -65,7 +65,7 @@ public class TestWriteToReplica {
   // test append
   @Test
   public void testAppend() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -84,7 +84,7 @@ public class TestWriteToReplica {
   // test writeToRbw
   @Test
   public void testWriteToRbw() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
@@ -103,7 +103,7 @@ public class TestWriteToReplica {
   // test writeToTemporary
   @Test
   public void testWriteToTempoary() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
     try {
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Wed Oct 20 22:58:52 2010
@@ -117,7 +117,8 @@ public class TestBackupNode extends Test
     BackupNode backup = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(numDatanodes).build();
       fileSys = cluster.getFileSystem();
       //
       // verify that 'format' really blew away all pre-existing files
@@ -154,7 +155,8 @@ public class TestBackupNode extends Test
       //
       // Restart cluster and verify that file1 still exist.
       //
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+                                                .format(false).build();
       fileSys = cluster.getFileSystem();
       // check that file1 still exists
       checkFile(fileSys, file1, replication);
@@ -186,7 +188,7 @@ public class TestBackupNode extends Test
       // Restart cluster and verify that file2 exists and
       // file1 does not exist.
       //
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       fileSys = cluster.getFileSystem();
 
       assertTrue(!fileSys.exists(file1));
@@ -214,7 +216,7 @@ public class TestBackupNode extends Test
     BackupNode backup2 = null;
     try {
       // start name-node and backup node 1
-      cluster = new MiniDFSCluster(conf1, 0, true, null);
+      cluster = new MiniDFSCluster.Builder(conf1).numDataNodes(0).build();
       conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
       conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
       backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Wed Oct 20 22:58:52 2010
@@ -183,7 +183,7 @@ public class TestBlockTokenWithDFS exten
     Configuration conf = getConf(numDataNodes);
 
     try {
-      cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       assertEquals(numDataNodes, cluster.getDataNodes().size());
       // set a short token lifetime (1 second)
@@ -239,7 +239,7 @@ public class TestBlockTokenWithDFS exten
     Configuration conf = getConf(numDataNodes);
 
     try {
-      cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       assertEquals(numDataNodes, cluster.getDataNodes().size());
       // set a short token lifetime (1 second)
@@ -287,7 +287,7 @@ public class TestBlockTokenWithDFS exten
     Configuration conf = getConf(numDataNodes);
 
     try {
-      cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       assertEquals(numDataNodes, cluster.getDataNodes().size());
       // set a short token lifetime (1 second) initially

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java Wed Oct 20 22:58:52 2010
@@ -50,7 +50,7 @@ public class TestBlockUnderConstruction 
   @BeforeClass
   public static void setUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
     hdfs = (DistributedFileSystem)cluster.getFileSystem();
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java Wed Oct 20 22:58:52 2010
@@ -58,7 +58,7 @@ public class TestBlocksWithNotEnoughRack
     final Path FILE_PATH = new Path(FILE_NAME);
     //All datanodes are on the same rack
     String racks[] = {"/rack1","/rack1","/rack1",} ;
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, racks);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).racks(racks).build();
     try {
       // create a file with one block with a replication factor of 3
       final FileSystem fs = cluster.getFileSystem();
@@ -112,7 +112,7 @@ public class TestBlocksWithNotEnoughRack
     final Path FILE_PATH = new Path(FILE_NAME);
     //All datanodes are on the same rack
     String racks[] = {"/rack1","/rack1","/rack1",} ;
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, racks);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).racks(racks).build();
     try {
       // create a file with one block with a replication factor of 3
       final FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java Wed Oct 20 22:58:52 2010
@@ -79,7 +79,7 @@ public class TestCheckPointForSecurityTo
     DistributedFileSystem fs = null;
     try {
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
       cluster.waitActive();
       fs = (DistributedFileSystem)(cluster.getFileSystem());
       FSNamesystem namesystem = cluster.getNamesystem();
@@ -118,7 +118,7 @@ public class TestCheckPointForSecurityTo
       cluster.shutdown();
       cluster = null;
 
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       cluster.waitActive();
       //Should be able to renew & cancel the delegation token after cluster restart
       try {
@@ -139,7 +139,7 @@ public class TestCheckPointForSecurityTo
       cluster.shutdown();
       cluster = null;
 
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       cluster.waitActive();
 
       namesystem = cluster.getNamesystem();
@@ -162,7 +162,7 @@ public class TestCheckPointForSecurityTo
       cluster.shutdown();
       cluster = null;
 
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       cluster.waitActive();
 
       namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Wed Oct 20 22:58:52 2010
@@ -125,7 +125,7 @@ public class TestCheckpoint extends Test
     File first = new File(namedirs.iterator().next().getPath());
     removeOneNameDir(first);
     try {
-      cluster = new MiniDFSCluster(conf, 0, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
       cluster.shutdown();
       assertTrue(false);
     } catch (Throwable t) {
@@ -142,8 +142,9 @@ public class TestCheckpoint extends Test
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 1");
     Path file1 = new Path("checkpointxx.dat");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
-                                                false, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .format(false).build();
     cluster.waitActive();
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -178,7 +179,8 @@ public class TestCheckpoint extends Test
     // namenode restart accounted for the rolled edit logs.
     //
     System.out.println("Starting testSecondaryNamenodeError 2");
-    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+                                              .format(false).build();
     cluster.waitActive();
     // Also check that the edits file is empty here
     // and that temporary checkpoint files are gone.
@@ -219,8 +221,9 @@ public class TestCheckpoint extends Test
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 21");
     Path file1 = new Path("checkpointyy.dat");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
-                                                false, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .format(false).build();
     cluster.waitActive();
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -255,7 +258,7 @@ public class TestCheckpoint extends Test
     // namenode restart accounted for the rolled edit logs.
     //
     System.out.println("Starting testSecondaryNamenodeError 22");
-    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
     try {
@@ -278,8 +281,10 @@ public class TestCheckpoint extends Test
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 31");
     Path file1 = new Path("checkpointzz.dat");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
-                                                false, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .format(false).build();
+
     cluster.waitActive();
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -322,7 +327,7 @@ public class TestCheckpoint extends Test
     // namenode restart accounted for the twice-rolled edit logs.
     //
     System.out.println("Starting testSecondaryNamenodeError 32");
-    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
     try {
@@ -347,8 +352,9 @@ public class TestCheckpoint extends Test
     throws IOException {
     System.out.println("Starting testSecondaryFailsToReturnImage");
     Path file1 = new Path("checkpointRI.dat");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
-                                                false, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .format(false).build();
     cluster.waitActive();
     FileSystem fileSys = cluster.getFileSystem();
     FSImage image = cluster.getNameNode().getFSImage();
@@ -398,8 +404,9 @@ public class TestCheckpoint extends Test
     throws IOException {
     System.out.println("Starting testNameNodeImageSendFail");
     Path file1 = new Path("checkpointww.dat");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
-                                                false, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .format(false).build();
     cluster.waitActive();
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -597,7 +604,7 @@ public class TestCheckpoint extends Test
     nn.stop(); nn = null;
     
     // Check that everything starts ok now.
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
     cluster.waitActive();
     cluster.shutdown();
   }
@@ -636,8 +643,9 @@ public class TestCheckpoint extends Test
 
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-    replication = (short)conf.getInt("dfs.replication", 3);  
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    replication = (short)conf.getInt("dfs.replication", 3);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes).build();
     cluster.waitActive();
     FileSystem fileSys = cluster.getFileSystem();
 
@@ -670,7 +678,7 @@ public class TestCheckpoint extends Test
     //
     // Restart cluster and verify that file1 still exist.
     //
-    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
     try {
@@ -697,7 +705,7 @@ public class TestCheckpoint extends Test
     // Restart cluster and verify that file2 exists and
     // file1 does not exist.
     //
-    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
 
@@ -730,7 +738,7 @@ public class TestCheckpoint extends Test
     DistributedFileSystem fs = null;
     try {
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       cluster.waitActive();
       fs = (DistributedFileSystem)(cluster.getFileSystem());
 
@@ -772,7 +780,7 @@ public class TestCheckpoint extends Test
       cluster.shutdown();
       cluster = null;
 
-      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
       cluster.waitActive();
       fs = (DistributedFileSystem)(cluster.getFileSystem());
       checkFile(fs, file, replication);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java Wed Oct 20 22:58:52 2010
@@ -36,7 +36,7 @@ public class TestComputeInvalidateWork e
   public void testCompInvalidate() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     final int NUM_OF_DATANODES = 3;
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
     try {
       cluster.waitActive();
       final FSNamesystem namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Wed Oct 20 22:58:52 2010
@@ -53,7 +53,7 @@ public class TestCorruptFilesJsp  {
       conf.setInt("dfs.datanode.directoryscan.interval", 1);
       // datanode sends block reports
       conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000);
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
 
       FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Wed Oct 20 22:58:52 2010
@@ -83,7 +83,7 @@ public class TestDeadDatanode {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
 
     // wait for datanode to be marked live

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Wed Oct 20 22:58:52 2010
@@ -82,7 +82,7 @@ public class TestDecommissioningStatus {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
     writeConfigFile(localFileSys, excludeFile, null);
 
-    cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Oct 20 22:58:52 2010
@@ -96,7 +96,7 @@ public class TestEditLog extends TestCas
     FileSystem fileSys = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
       cluster.waitActive();
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Wed Oct 20 22:58:52 2010
@@ -172,7 +172,7 @@ public class TestEditLogRace {
 
     AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
     try {
-      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
       cluster.waitActive();
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();
@@ -240,7 +240,7 @@ public class TestEditLogRace {
 
     AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
     try {
-      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
       cluster.waitActive();
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java Wed Oct 20 22:58:52 2010
@@ -85,7 +85,7 @@ public class TestFileLimit extends TestC
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FSNamesystem namesys = cluster.getNamesystem();
     try {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Oct 20 22:58:52 2010
@@ -96,7 +96,7 @@ public class TestFsck extends TestCase {
       final long precision = 1L;
       conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
       conf.setLong("dfs.blockreport.intervalMsec", 10000L);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       fs = cluster.getFileSystem();
       final String fileName = "/srcdat";
       util.createFiles(fs, fileName);
@@ -114,7 +114,7 @@ public class TestFsck extends TestCase {
       cluster.shutdown();
       
       // restart the cluster; bring up namenode but not the data nodes
-      cluster = new MiniDFSCluster(conf, 0, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
       outStr = runFsck(conf, 1, true, "/");
       // expect the result is corrupt
       assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
@@ -166,7 +166,7 @@ public class TestFsck extends TestCase {
     try {
       Configuration conf = new HdfsConfiguration();
       conf.setLong("dfs.blockreport.intervalMsec", 10000L);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       util.waitReplication(fs, "/srcdat", (short)3);
@@ -189,7 +189,7 @@ public class TestFsck extends TestCase {
     MiniDFSCluster cluster = null;
     try {
       // Create a cluster with the current user, write some files
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       final MiniDFSCluster c2 = cluster;
       final String dir = "/dfsck";
       final Path dirpath = new Path(dir);
@@ -236,7 +236,7 @@ public class TestFsck extends TestCase {
       Configuration conf = new HdfsConfiguration();
       conf.setLong("dfs.blockreport.intervalMsec", 10000L);
       conf.setInt("dfs.datanode.directoryscan.interval", 1);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       String topDir = "/srcdat";
       fs = cluster.getFileSystem();
       cluster.waitActive();
@@ -295,7 +295,7 @@ public class TestFsck extends TestCase {
     try {
       Configuration conf = new HdfsConfiguration();
       conf.setLong("dfs.blockreport.intervalMsec", 10000L);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       String topDir = "/srcdat";
       String randomString = "HADOOP  ";
       fs = cluster.getFileSystem();
@@ -350,7 +350,7 @@ public class TestFsck extends TestCase {
 
     MiniDFSCluster cluster = null;
     try {
-    cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/testCorruptBlock");
@@ -423,7 +423,7 @@ public class TestFsck extends TestCase {
     try {
       // bring up a one-node cluster
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       String fileName = "/test.txt";
       Path filePath = new Path(fileName);
       FileSystem fs = cluster.getFileSystem();
@@ -460,7 +460,7 @@ public class TestFsck extends TestCase {
 
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
       DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Wed Oct 20 22:58:52 2010
@@ -65,7 +65,7 @@ public class TestHDFSConcat {
   
   @Before
   public void startUpCluster() throws IOException {
-    cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
     dfs = (DistributedFileSystem) cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java Wed Oct 20 22:58:52 2010
@@ -43,7 +43,7 @@ public class TestHeartbeatHandling exten
    */
   public void testHeartbeat() throws Exception {
     final Configuration conf = new HdfsConfiguration();
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
       cluster.waitActive();
       final FSNamesystem namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java Wed Oct 20 22:58:52 2010
@@ -207,7 +207,7 @@ public class TestLargeDirectoryDelete {
   
   @Test
   public void largeDelete() throws Throwable {
-    mc = new MiniDFSCluster(CONF, 1, true, null);
+    mc = new MiniDFSCluster.Builder(CONF).build();
     try {
       mc.waitActive();
       Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Wed Oct 20 22:58:52 2010
@@ -55,7 +55,7 @@ public class TestListCorruptFileBlocks e
       Configuration conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
       conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
 
       // create two files with one block each
@@ -126,7 +126,7 @@ public class TestListCorruptFileBlocks e
 
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
       DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
@@ -213,7 +213,7 @@ public class TestListCorruptFileBlocks e
       Configuration conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.directoryscan.interval", 15); // datanode scans directories
       conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
       final int maxCorruptFileBlocks = 
         FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Wed Oct 20 22:58:52 2010
@@ -68,7 +68,7 @@ public class TestMetaSave {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1L);
-    cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Wed Oct 20 22:58:52 2010
@@ -135,8 +135,10 @@ public class TestNameEditsConfigs extend
     conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointNameAndEdits.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
     // Manage our own dfs directories
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, true, false, true, null,
-                                  null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(NUM_DATA_NODES)
+                                .manageNameDfsDirs(false).build();
+
     cluster.waitActive();
     secondary = startSecondaryNameNode(conf);
     fileSys = cluster.getFileSystem();
@@ -167,8 +169,11 @@ public class TestNameEditsConfigs extend
              "," + checkpointNameAndEdits.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
     // Manage our own dfs directories. Do not format.
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true, 
-                                  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
+                                              .format(false)
+                                              .manageNameDfsDirs(false)
+                                              .build();
+
     cluster.waitActive();
     secondary = startSecondaryNameNode(conf);
     fileSys = cluster.getFileSystem();
@@ -209,8 +214,12 @@ public class TestNameEditsConfigs extend
     conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
-                                  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(NUM_DATA_NODES)
+                                .format(false)
+                                .manageNameDfsDirs(false)
+                                .build();
+
     cluster.waitActive();
     secondary = startSecondaryNameNode(conf);
     fileSys = cluster.getFileSystem();
@@ -248,8 +257,11 @@ public class TestNameEditsConfigs extend
     conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() +
         "," + checkpointNameAndEdits.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
-                                  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(NUM_DATA_NODES)
+                                .format(false)
+                                .manageNameDfsDirs(false)
+                                .build();
     cluster.waitActive();
     secondary = startSecondaryNameNode(conf);
     fileSys = cluster.getFileSystem();
@@ -297,8 +309,10 @@ public class TestNameEditsConfigs extend
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
     // Manage our own dfs directories
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, true, false, true, null,
-                                  null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(NUM_DATA_NODES)
+                                .manageNameDfsDirs(false)
+                                .build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
 
@@ -322,8 +336,11 @@ public class TestNameEditsConfigs extend
               "," + newEditsDir.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
     // Manage our own dfs directories. Do not format.
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true, 
-                                  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(NUM_DATA_NODES)
+                                .format(false)
+                                .manageNameDfsDirs(false)
+                                .build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
 
@@ -344,8 +361,11 @@ public class TestNameEditsConfigs extend
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
-    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
-                                  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(NUM_DATA_NODES)
+                                .format(false)
+                                .manageNameDfsDirs(false)
+                                .build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
 
@@ -368,8 +388,11 @@ public class TestNameEditsConfigs extend
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
     try {
-      cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
-                                  null, null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(NUM_DATA_NODES)
+                                  .format(false)
+                                  .manageNameDfsDirs(false)
+                                  .build();
       assertTrue(false);
     } catch (IOException e) { // expect to fail
       System.out.println("cluster start failed due to missing " +
@@ -385,8 +408,11 @@ public class TestNameEditsConfigs extend
              "," + nameAndEdits.getPath());
     replication = (short)conf.getInt("dfs.replication", 3);
     try {
-      cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
-                                   null, null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(NUM_DATA_NODES)
+                                  .format(false)
+                                  .manageNameDfsDirs(false)
+                                  .build();
       assertTrue(false);
     } catch (IOException e) { // expect to fail
       System.out.println("cluster start failed due to missing latest name dir");

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java Wed Oct 20 22:58:52 2010
@@ -42,7 +42,7 @@ public class TestNameNodeJspHelper {
   @Before
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
-    cluster  = new MiniDFSCluster(conf, 1, true, null);
+    cluster  = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Wed Oct 20 22:58:52 2010
@@ -39,7 +39,7 @@ public class TestNameNodeMXBean {
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
 
       FSNamesystem fsn = cluster.getNameNode().namesystem;

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Wed Oct 20 22:58:52 2010
@@ -53,7 +53,7 @@ public class TestNamenodeCapacityReport 
     conf.setLong("dfs.datanode.du.reserved", reserved);
     
     try {
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
       
       final FSNamesystem namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java Wed Oct 20 22:58:52 2010
@@ -43,7 +43,7 @@ public class TestNodeCount extends TestC
     final Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = (short)2;
     final MiniDFSCluster cluster = 
-      new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
+      new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
     try {
       final FSNamesystem namesystem = cluster.getNamesystem();
       final FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Wed Oct 20 22:58:52 2010
@@ -44,7 +44,7 @@ public class TestOverReplicatedBlocks ex
     Configuration conf = new HdfsConfiguration();
     conf.setLong("dfs.blockreport.intervalMsec", 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
 
     try {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java Wed Oct 20 22:58:52 2010
@@ -92,7 +92,7 @@ public class TestSecurityTokenEditLog ex
     FileSystem fileSys = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
       cluster.waitActive();
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Wed Oct 20 22:58:52 2010
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption.IMPORT;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
@@ -33,11 +36,9 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
@@ -114,7 +115,9 @@ public class TestStartup extends TestCas
     SecondaryNameNode sn = null;
     
     try {
-      cluster = new MiniDFSCluster(0, config, 1, true, false, false,  null, null, null, null);
+      cluster = new MiniDFSCluster.Builder(config)
+                                  .manageDataDfsDirs(false)
+                                  .manageNameDfsDirs(false).build();
       cluster.waitActive();
 
       LOG.info("--starting Secondary Node");
@@ -190,7 +193,11 @@ public class TestStartup extends TestCas
     LOG.info("-- about to start DFS cluster");
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster(0, config, 1, false, false, false,  StartupOption.IMPORT, null, null, null);
+      cluster = new MiniDFSCluster.Builder(config)
+                                  .format(false)
+                                  .manageDataDfsDirs(false)
+                                  .manageNameDfsDirs(false)
+                                  .startupOption(IMPORT).build();
       cluster.waitActive();
       LOG.info("--NN started with checkpoint option");
       NameNode nn = cluster.getNameNode();
@@ -300,7 +307,9 @@ public class TestStartup extends TestCas
     SecondaryNameNode sn = null;
     NameNode nn = null;
     try {
-      cluster = new MiniDFSCluster(0, config, 1, true, false, false,  null, null, null, null);
+      cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false)
+                                                  .manageNameDfsDirs(false)
+                                                  .build();
       cluster.waitActive();
       nn = cluster.getNameNode();
       assertNotNull(nn);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Wed Oct 20 22:58:52 2010
@@ -285,7 +285,9 @@ public class TestStorageRestore extends 
   @SuppressWarnings("deprecation")
   public void testStorageRestore() throws Exception {
     int numDatanodes = 2;
-    cluster = new MiniDFSCluster(0, config, numDatanodes, true, false, true,  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes)
+                                                .manageNameDfsDirs(false)
+                                                .build();
     cluster.waitActive();
     
     SecondaryNameNode secondary = new SecondaryNameNode(config);
@@ -331,10 +333,9 @@ public class TestStorageRestore extends 
    * @throws Exception
    */
   public void testDfsAdminCmd() throws Exception {
-    int numDatanodes = 2;
-    
-    
-    cluster = new MiniDFSCluster(0, config, numDatanodes, true, false, true,  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(config).
+                                 numDataNodes(2).
+                                 manageNameDfsDirs(false).build();
     cluster.waitActive();
     try {
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java Wed Oct 20 22:58:52 2010
@@ -34,7 +34,7 @@ public class TestUnderReplicatedBlocks e
     final short REPLICATION_FACTOR = 2;
     final String FILE_NAME = "/testFile";
     final Path FILE_PATH = new Path(FILE_NAME);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR+1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
     try {
       // create a file with one block with a replication factor of 2
       final FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java Wed Oct 20 22:58:52 2010
@@ -51,7 +51,7 @@ public class TestNNMetricFilesInGetListi
 
   @Override
   protected void setUp() throws Exception {
-    cluster = new MiniDFSCluster(CONF, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(CONF).build();
     cluster.waitActive();
     cluster.getNameNode();
     nnMetrics = NameNode.getNameNodeMetrics();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Wed Oct 20 22:58:52 2010
@@ -71,7 +71,7 @@ public class TestNameNodeMetrics extends
   
   @Override
   protected void setUp() throws Exception {
-    cluster = new MiniDFSCluster(CONF, DATANODE_COUNT, true, null);
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build();
     cluster.waitActive();
     namesystem = cluster.getNamesystem();
     fs = (DistributedFileSystem) cluster.getFileSystem();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Wed Oct 20 22:58:52 2010
@@ -103,7 +103,7 @@ public class TestOfflineImageViewer exte
     File orig = null;
     try {
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       FileSystem hdfs = cluster.getFileSystem();
       
       int filesize = 256;

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestPermission.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestPermission.java Wed Oct 20 22:58:52 2010
@@ -102,7 +102,7 @@ public class TestPermission extends Test
     FileSystem fs = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, 3, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       cluster.waitActive();
       fs = FileSystem.get(conf);
       FsPermission rootPerm = checkPermission(fs, "/", null);
@@ -150,7 +150,7 @@ public class TestPermission extends Test
   public void testFilePermision() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
 
     try {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/security/TestRefreshUserMappings.java Wed Oct 20 22:58:52 2010
@@ -79,7 +79,7 @@ public class TestRefreshUserMappings {
     Groups.getUserToGroupsMappingService(config);
     
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java?rev=1025788&r1=1025787&r2=1025788&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java Wed Oct 20 22:58:52 2010
@@ -85,8 +85,7 @@ public class TestJMXGet extends TestCase
    */
   public void testNameNode() throws Exception {
     int numDatanodes = 2;
-    cluster = new MiniDFSCluster(0, config, numDatanodes, true, true, null, 
-        null, null);
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
     cluster.waitActive();
 
     writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
@@ -114,8 +113,7 @@ public class TestJMXGet extends TestCase
    */
   public void testDataNode() throws Exception {
     int numDatanodes = 2;
-    cluster = new MiniDFSCluster(0, config, numDatanodes, true, true, null,
-        null, null);
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
     cluster.waitActive();
 
     writeFile(cluster.getFileSystem(), new Path("/test"), 2);



Mime
View raw message