hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1446832 [3/3] - in /hadoop/common/branches/HDFS-347/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/test/java...
Date Sat, 16 Feb 2013 01:12:16 GMT
Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Sat Feb 16 01:12:07 2013
@@ -23,7 +23,10 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyShort;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -49,13 +52,13 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsUtils;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -64,12 +67,14 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
@@ -208,7 +213,7 @@ public class TestDFSClientRetries {
    * Verify that client will correctly give up after the specified number
    * of times trying to add a block
    */
-  @SuppressWarnings("serial")
+  @SuppressWarnings({ "serial", "unchecked" })
   @Test
   public void testNotYetReplicatedErrors() throws IOException
   { 
@@ -235,7 +240,22 @@ public class TestDFSClientRetries {
     when(mockNN.addBlock(anyString(), 
                          anyString(),
                          any(ExtendedBlock.class),
-                         any(DatanodeInfo[].class))).thenAnswer(answer);
+                         any(DatanodeInfo[].class),
+                         anyLong())).thenAnswer(answer);
+    
+    Mockito.doReturn(
+            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+                (short) 777), "owner", "group", new byte[0], new byte[0],
+                1010)).when(mockNN).getFileInfo(anyString());
+    
+    Mockito.doReturn(
+            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+                (short) 777), "owner", "group", new byte[0], new byte[0],
+                1010))
+        .when(mockNN)
+        .create(anyString(), (FsPermission) anyObject(), anyString(),
+            (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
+            anyShort(), anyLong());
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);
     OutputStream os = client.create("testfile", true);
@@ -369,7 +389,8 @@ public class TestDFSClientRetries {
           return ret2;
         }
       }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
-          Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+          Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
+          Mockito.anyLong());
 
       doAnswer(new Answer<Boolean>() {
 
@@ -410,7 +431,8 @@ public class TestDFSClientRetries {
       // Make sure the mock was actually properly injected.
       Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
           Mockito.anyString(), Mockito.anyString(),
-          Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+          Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
+          Mockito.anyLong());
       Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
           Mockito.anyString(), Mockito.anyString(),
           Mockito.<ExtendedBlock>any());

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Sat Feb 16 01:12:07 2013
@@ -619,6 +619,16 @@ public class TestDFSUtil {
     
     assertEquals(1, uris.size());
     assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
+
+    // Make sure when config FS_DEFAULT_NAME_KEY using IP address,
+    // it will automatically convert it to hostname
+    conf = new HdfsConfiguration();
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
+    uris = DFSUtil.getNameServiceUris(conf);
+    assertEquals(1, uris.size());
+    for (URI uri : uris) {
+      assertFalse(uri.getHost().equals("127.0.0.1"));
+    }
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Sat Feb 16 01:12:07 2013
@@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -517,8 +518,8 @@ public class TestFileCreation {
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
       // add one block to the file
-      LocatedBlock location = client.getNamenode().addBlock(file1.toString(), 
-          client.clientName, null, null);
+      LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
+          client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID);
       System.out.println("testFileCreationError2: "
           + "Added block " + location.getBlock());
 
@@ -568,8 +569,8 @@ public class TestFileCreation {
       final Path f = new Path("/foo.txt");
       createFile(dfs, f, 3);
       try {
-        cluster.getNameNodeRpc().addBlock(f.toString(), 
-            client.clientName, null, null);
+        cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
+            null, null, INodeId.GRANDFATHER_INODE_ID);
         fail();
       } catch(IOException ioe) {
         FileSystem.LOG.info("GOOD!", ioe);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java Sat Feb 16 01:12:07 2013
@@ -43,7 +43,7 @@ public class TestFileLengthOnClusterRest
         .numDataNodes(2).build();
     HdfsDataInputStream in = null;
     try {
-      Path path = new Path(MiniDFSCluster.getBaseDirectory(), "test");
+      Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
       DistributedFileSystem dfs = (DistributedFileSystem) cluster
           .getFileSystem();
       FSDataOutputStream out = dfs.create(path);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Sat Feb 16 01:12:07 2013
@@ -88,7 +88,7 @@ public class TestGetBlocks {
   @Test
   public void testReadSelectNonStaleDatanode() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     long staleInterval = 30 * 1000 * 60;
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
         staleInterval);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java Sat Feb 16 01:12:07 2013
@@ -183,8 +183,7 @@ public class TestLargeBlock {
     try {
 
       // create a new file in test data directory
-      Path file1 = new Path(System.getProperty("test.build.data") + "/" +
-          Long.toString(blockSize) + ".dat");
+      Path file1 = new Path("/tmp/TestLargeBlock", blockSize + ".dat");
       FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
       LOG.info("File " + file1 + " created with file size " +
           fileSize +

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java Sat Feb 16 01:12:07 2013
@@ -18,6 +18,10 @@
 package org.apache.hadoop.hdfs;
 
 import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.anyShort;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
@@ -29,14 +33,19 @@ import java.security.PrivilegedException
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -85,9 +94,26 @@ public class TestLease {
 
       // We don't need to wait the lease renewer thread to act.
       // call renewLease() manually.
-      // make it look like lease has already expired.
+      // make it look like the soft limit has been exceeded.
       LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
-      dfs.lastLeaseRenewal = Time.now() - 300000;
+      dfs.lastLeaseRenewal = Time.now()
+      - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
+      try {
+        dfs.renewLease();
+      } catch (IOException e) {}
+
+      // Things should continue to work it passes hard limit without
+      // renewing.
+      try {
+        d_out.write(buf, 0, 1024);
+        LOG.info("Write worked beyond the soft limit as expected.");
+      } catch (IOException e) {
+        Assert.fail("Write failed.");
+      }
+
+      // make it look like the hard limit has been exceeded.
+      dfs.lastLeaseRenewal = Time.now()
+      - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
       dfs.renewLease();
 
       // this should not work.
@@ -256,6 +282,7 @@ public class TestLease {
     }
   }
 
+  @SuppressWarnings("unchecked")
   @Test
   public void testFactory() throws Exception {
     final String[] groups = new String[]{"supergroup"};
@@ -264,6 +291,20 @@ public class TestLease {
       ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
     }
 
+    Mockito.doReturn(
+        new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+            (short) 777), "owner", "group", new byte[0], new byte[0],
+            1010)).when(mcp).getFileInfo(anyString());
+    Mockito
+        .doReturn(
+            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+                (short) 777), "owner", "group", new byte[0], new byte[0],
+                1010))
+        .when(mcp)
+        .create(anyString(), (FsPermission) anyObject(), anyString(),
+            (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
+            anyShort(), anyLong());
+
     final Configuration conf = new Configuration();
     final DFSClient c1 = createDFSClientAs(ugi[0], conf);
     FSDataOutputStream out1 = createFsOut(c1, "/out1");

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java Sat Feb 16 01:12:07 2013
@@ -38,6 +38,7 @@ public class TestListFilesInDFS extends 
 
   @BeforeClass
   public static void testSetUp() throws Exception {
+    setTestPaths(new Path("/tmp/TestListFilesInDFS"));
     cluster = new MiniDFSCluster.Builder(conf).build();
     fs = cluster.getFileSystem();
     fs.delete(TEST_DIR, true);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Sat Feb 16 01:12:07 2013
@@ -70,8 +70,8 @@ public class TestQuota {
       throw new DSQuotaExceededException(bytes, bytes);
     } catch(DSQuotaExceededException e) {
       
-      assertEquals("The DiskSpace quota is exceeded: quota=1.0k " +
-          "diskspace consumed=1.0k", e.getMessage());
+      assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB"
+          + " but diskspace consumed = 1024 B = 1 KB", e.getMessage());
     }
   }
   

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Sat Feb 16 01:12:07 2013
@@ -46,8 +46,10 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Tool;
 import org.junit.Test;
 
 /**
@@ -95,7 +97,6 @@ public class TestBalancer {
     DFSTestUtil.waitReplication(fs, filePath, replicationFactor);
   }
 
-
   /* fill up a cluster with <code>numNodes</code> datanodes 
    * whose used space to be <code>size</code>
    */
@@ -301,10 +302,12 @@ public class TestBalancer {
    * @param racks - array of racks for original nodes in cluster
    * @param newCapacity - new node's capacity
    * @param newRack - new node's rack
+   * @param useTool - if true run test via Cli with command-line argument 
+   *   parsing, etc.   Otherwise invoke balancer API directly.
    * @throws Exception
    */
   private void doTest(Configuration conf, long[] capacities, String[] racks, 
-      long newCapacity, String newRack) throws Exception {
+      long newCapacity, String newRack, boolean useTool) throws Exception {
     assertEquals(capacities.length, racks.length);
     int numOfDatanodes = capacities.length;
     cluster = new MiniDFSCluster.Builder(conf)
@@ -330,7 +333,11 @@ public class TestBalancer {
       totalCapacity += newCapacity;
 
       // run balancer and validate results
-      runBalancer(conf, totalUsedSpace, totalCapacity);
+      if (useTool) {
+        runBalancerCli(conf, totalUsedSpace, totalCapacity);
+      } else {
+        runBalancer(conf, totalUsedSpace, totalCapacity);
+      }
     } finally {
       cluster.shutdown();
     }
@@ -350,22 +357,38 @@ public class TestBalancer {
     waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
   }
   
+  private void runBalancerCli(Configuration conf,
+      long totalUsedSpace, long totalCapacity) throws Exception {
+    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
+
+    final String[] args = { "-policy", "datanode" };
+    final Tool tool = new Cli();    
+    tool.setConf(conf);
+    final int r = tool.run(args); // start rebalancing
+    
+    assertEquals("Tools should exit 0 on success", 0, r);
+    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
+    LOG.info("Rebalancing with default ctor.");
+    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
+  }
+  
   /** one-node cluster test*/
-  private void oneNodeTest(Configuration conf) throws Exception {
+  private void oneNodeTest(Configuration conf, boolean useTool) throws Exception {
     // add an empty node with half of the CAPACITY & the same rack
-    doTest(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
+    doTest(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, 
+            RACK0, useTool);
   }
   
   /** two-node cluster test */
   private void twoNodeTest(Configuration conf) throws Exception {
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
-        CAPACITY, RACK2);
+        CAPACITY, RACK2, false);
   }
   
   /** test using a user-supplied conf */
   public void integrationTest(Configuration conf) throws Exception {
     initConf(conf);
-    oneNodeTest(conf);
+    oneNodeTest(conf, false);
   }
   
   /**
@@ -401,7 +424,7 @@ public class TestBalancer {
   
   void testBalancer0Internal(Configuration conf) throws Exception {
     initConf(conf);
-    oneNodeTest(conf);
+    oneNodeTest(conf, false);
     twoNodeTest(conf);
   }
 
@@ -495,7 +518,18 @@ public class TestBalancer {
 
   }
 
-
+  /**
+   * Verify balancer exits 0 on success.
+   */
+  @Test(timeout=100000)
+  public void testExitZeroOnSuccess() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    
+    initConf(conf);
+    
+    oneNodeTest(conf, true);
+  }
+  
   /**
    * @param args
    */

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Sat Feb 16 01:12:07 2013
@@ -67,7 +67,7 @@ public class TestRBWBlockInvalidation {
     try {
       final FSNamesystem namesystem = cluster.getNamesystem();
       FileSystem fs = cluster.getFileSystem();
-      Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1");
+      Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
       out = fs.create(testPath, (short) 2);
       out.writeBytes("HDFS-3157: " + testPath);
       out.hsync();

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Sat Feb 16 01:12:07 2013
@@ -88,9 +88,11 @@ public class TestReplicationPolicy {
         "test.build.data", "build/test/data"), "dfs/");
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         new File(baseDir, "name").getPath());
-    // Enable the checking for stale datanodes in the beginning
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
 
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
     DFSTestUtil.formatNameNode(conf);
     namenode = new NameNode(conf);
 
@@ -100,6 +102,8 @@ public class TestReplicationPolicy {
     // construct network topology
     for (int i=0; i < NUM_OF_DATANODES; i++) {
       cluster.add(dataNodes[i]);
+      bm.getDatanodeManager().getHeartbeatManager().addDatanode(
+          dataNodes[i]);
     }
     for (int i=0; i < NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
@@ -393,11 +397,11 @@ public class TestReplicationPolicy {
       throws Exception {
     try {
       namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(true);
+        .setNumStaleNodes(NUM_OF_DATANODES);
       testChooseTargetWithMoreThanAvailableNodes();
     } finally {
       namenode.getNamesystem().getBlockManager().getDatanodeManager()
-      .setAvoidStaleDataNodesForWrite(false);
+        .setNumStaleNodes(0);
     }
   }
   
@@ -479,12 +483,12 @@ public class TestReplicationPolicy {
   
   @Test
   public void testChooseTargetWithStaleNodes() throws Exception {
-    // Enable avoidng writing to stale datanodes
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(true);
     // Set dataNodes[0] as stale
     dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
-
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+    assertTrue(namenode.getNamesystem().getBlockManager()
+        .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
     DatanodeDescriptor[] targets;
     // We set the datanode[0] as stale, thus should choose datanode[1] since
     // datanode[1] is on the same rack with datanode[0] (writer)
@@ -503,9 +507,9 @@ public class TestReplicationPolicy {
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     
     // reset
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(false);
     dataNodes[0].setLastUpdate(Time.now());
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
   }
 
   /**
@@ -518,20 +522,20 @@ public class TestReplicationPolicy {
    */
   @Test
   public void testChooseTargetWithHalfStaleNodes() throws Exception {
-    // Enable stale datanodes checking
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(true);
     // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
     for (int i = 0; i < 3; i++) {
       dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
     }
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(filename, 0, dataNodes[0],
         new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
 
-    // We set the datanode[0] as stale, thus should choose datanode[1]
+    // Since we have 6 datanodes total, stale nodes should
+    // not be returned until we ask for more than 3 targets
     targets = replicator.chooseTarget(filename, 1, dataNodes[0],
         new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
@@ -557,18 +561,16 @@ public class TestReplicationPolicy {
     assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
     assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
 
-    // reset
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(false);
     for (int i = 0; i < dataNodes.length; i++) {
       dataNodes[i].setLastUpdate(Time.now());
     }
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
   }
 
   @Test
   public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
     String[] hosts = new String[]{"host1", "host2", "host3", 
@@ -598,7 +600,7 @@ public class TestReplicationPolicy {
           .getBlockManager().getDatanodeManager().getNumStaleNodes();
       assertEquals(numStaleNodes, 2);
       assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+          .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
       // Call chooseTarget
       DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
           .getNamesystem().getBlockManager().getDatanodeManager()
@@ -627,7 +629,7 @@ public class TestReplicationPolicy {
       // According to our strategy, stale datanodes will be included for writing
       // to avoid hotspots
       assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());     
+          .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
       // Call chooseTarget
       targets = replicator.chooseTarget(filename, 3,
           staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
@@ -650,7 +652,7 @@ public class TestReplicationPolicy {
           .getBlockManager().getDatanodeManager().getNumStaleNodes();
       assertEquals(numStaleNodes, 2);
       assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+          .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
       // Call chooseTarget
       targets = replicator.chooseTarget(filename, 3,
           staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Sat Feb 16 01:12:07 2013
@@ -506,7 +506,11 @@ public abstract class FSImageTestUtil {
       props.load(fis);
       IOUtils.closeStream(fis);
   
-      props.setProperty(key, value);
+      if (value == null || value.isEmpty()) {
+        props.remove(key);
+      } else {
+        props.setProperty(key, value);
+      }
       
       out = new FileOutputStream(versionFile);
       props.store(out, null);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Sat Feb 16 01:12:07 2013
@@ -1058,7 +1058,8 @@ public class NNThroughputBenchmark {
     throws IOException {
       ExtendedBlock prevBlock = null;
       for(int jdx = 0; jdx < blocksPerFile; jdx++) {
-        LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null);
+        LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
+            prevBlock, null, INodeId.GRANDFATHER_INODE_ID);
         prevBlock = loc.getBlock();
         for(DatanodeInfo dnInfo : loc.getLocations()) {
           int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Sat Feb 16 01:12:07 2013
@@ -24,8 +24,10 @@ import static org.junit.Assert.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options;
@@ -39,6 +41,8 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.Test;
 
 public class TestINodeFile {
@@ -376,7 +380,7 @@ public class TestINodeFile {
    * @throws IOException
    */
   @Test
-  public void TestInodeId() throws IOException {
+  public void testInodeId() throws IOException {
 
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
@@ -396,9 +400,14 @@ public class TestINodeFile {
     assertTrue(fs.mkdirs(path));
     assertTrue(fsn.getLastInodeId() == 1002);
 
-    Path filePath = new Path("/test1/file");
-    fs.create(filePath);
+    // Use namenode rpc to create a file
+    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+    HdfsFileStatus fileStatus = nnrpc.create("/test1/file", new FsPermission(
+        (short) 0755), "client",
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 1, 128 * 1024 * 1024L);
     assertTrue(fsn.getLastInodeId() == 1003);
+    assertTrue(fileStatus.getFileId() == 1003);
 
     // Rename doesn't increase inode id
     Path renamedPath = new Path("/test2");
@@ -412,4 +421,44 @@ public class TestINodeFile {
     cluster.waitActive();
     assertTrue(fsn.getLastInodeId() == 1003);
   }
+
+  @Test
+  public void testWriteToRenamedFile() throws IOException {
+
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+
+    Path path = new Path("/test1");
+    assertTrue(fs.mkdirs(path));
+
+    int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
+    byte[] data = new byte[size];
+
+    // Create one file
+    Path filePath = new Path("/test1/file");
+    FSDataOutputStream fos = fs.create(filePath);
+
+    // Rename /test1 to test2, and recreate /test1/file
+    Path renamedPath = new Path("/test2");
+    fs.rename(path, renamedPath);
+    fs.create(filePath, (short) 1);
+
+    // Add new block should fail since /test1/file has a different fileId
+    try {
+      fos.write(data, 0, data.length);
+      // make sure addBlock() request gets to NN immediately
+      fos.hflush();
+
+      fail("Write should fail after rename");
+    } catch (Exception e) {
+      /* Ignore */
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Sat Feb 16 01:12:07 2013
@@ -309,6 +309,88 @@ public class TestNameEditsConfigs {
   }
 
   /**
+   * Test edits.dir.required configuration options.
+   * 1. Directory present in dfs.namenode.edits.dir.required but not in
+   *    dfs.namenode.edits.dir. Expected to fail.
+   * 2. Directory present in both dfs.namenode.edits.dir.required and
+   *    dfs.namenode.edits.dir. Expected to succeed.
+   * 3. Directory present only in dfs.namenode.edits.dir. Expected to
+   *    succeed.
+   */
+  @Test
+  public void testNameEditsRequiredConfigs() throws IOException {
+    MiniDFSCluster cluster = null;
+    File nameAndEditsDir = new File(base_dir, "name_and_edits");
+    File nameAndEditsDir2 = new File(base_dir, "name_and_edits2");
+
+    // 1
+    // Bad configuration. Add a directory to dfs.namenode.edits.dir.required
+    // without adding it to dfs.namenode.edits.dir.
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
+          nameAndEditsDir2.toURI().toString());
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+          nameAndEditsDir.toURI().toString());
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(NUM_DATA_NODES)
+          .manageNameDfsDirs(false)
+          .build();
+      fail("Successfully started cluster but should not have been able to.");
+    } catch (IllegalArgumentException iae) { // expect to fail
+      LOG.info("EXPECTED: cluster start failed due to bad configuration" + iae);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      cluster = null;
+    }
+
+    // 2
+    // Good configuration. Add a directory to both dfs.namenode.edits.dir.required
+    // and dfs.namenode.edits.dir.
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setStrings(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+          nameAndEditsDir.toURI().toString(),
+          nameAndEditsDir2.toURI().toString());
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
+          nameAndEditsDir2.toURI().toString());
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(NUM_DATA_NODES)
+          .manageNameDfsDirs(false)
+          .build();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+    // 3
+    // Good configuration. Adds a directory to dfs.namenode.edits.dir but not to
+    // dfs.namenode.edits.dir.required.
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setStrings(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+          nameAndEditsDir.toURI().toString(),
+          nameAndEditsDir2.toURI().toString());
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(NUM_DATA_NODES)
+          .manageNameDfsDirs(false)
+          .build();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
    * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * This test tries to simulate failure scenarios.
    * 1. Start cluster with shared name and edits dir

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java Sat Feb 16 01:12:07 2013
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.collect.ImmutableMap;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 
 import org.junit.Test;
 import org.junit.Before;
@@ -51,7 +54,7 @@ public class TestSecondaryNameNodeUpgrad
     }
   }
 
-  private void doIt(String param, String val) throws IOException {
+  private void doIt(Map<String, String> paramsToCorrupt) throws IOException {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     SecondaryNameNode snn = null;
@@ -76,8 +79,12 @@ public class TestSecondaryNameNodeUpgrad
       snn.shutdown();
 
       for (File versionFile : versionFiles) {
-        System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile);
-        FSImageTestUtil.corruptVersionFile(versionFile, param, val);
+        for (Map.Entry<String, String> paramToCorrupt : paramsToCorrupt.entrySet()) {
+          String param = paramToCorrupt.getKey();
+          String val = paramToCorrupt.getValue();
+          System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile);
+          FSImageTestUtil.corruptVersionFile(versionFile, param, val);
+        }
       }
 
       snn = new SecondaryNameNode(conf);
@@ -94,13 +101,19 @@ public class TestSecondaryNameNodeUpgrad
 
   @Test
   public void testUpgradeLayoutVersionSucceeds() throws IOException {
-    doIt("layoutVersion", "-39");
+    doIt(ImmutableMap.of("layoutVersion", "-39"));
+  }
+
+  @Test
+  public void testUpgradePreFedSucceeds() throws IOException {
+    doIt(ImmutableMap.of("layoutVersion", "-19", "clusterID", "",
+          "blockpoolID", ""));
   }
 
   @Test
   public void testChangeNsIDFails() throws IOException {
     try {
-      doIt("namespaceID", "2");
+      doIt(ImmutableMap.of("namespaceID", "2"));
       Assert.fail("Should throw InconsistentFSStateException");
     } catch(IOException e) {
       GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Sat Feb 16 01:12:07 2013
@@ -82,7 +82,7 @@ public class TestNameNodeMetrics {
     CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, 
         "" + PERCENTILES_INTERVAL);
     // Enable stale DataNodes checking
-    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     ((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
       .getLogger().setLevel(Level.DEBUG);
   }

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Sat Feb 16 01:12:07 2013
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
@@ -42,9 +43,10 @@ public class TestJsonUtil {
   public void testHdfsFileStatus() {
     final long now = Time.now();
     final String parent = "/dir";
-    final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L<<26,
-        now, now + 10, new FsPermission((short)0644), "user", "group",
-        DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"));
+    final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
+        now, now + 10, new FsPermission((short) 0644), "user", "group",
+        DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
+        INodeId.GRANDFATHER_INODE_ID);
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);

Modified: hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1446832&r1=1446831&r2=1446832&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-347/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Sat Feb 16 01:12:07 2013
@@ -1182,7 +1182,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^1\.0k\s+hdfs:///dir0/data1k</expected-output>
+          <expected-output>^1\.0 K\s+hdfs:///dir0/data1k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -15590,7 +15590,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>put: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]*</expected-output>
+          <expected-output>put: The DiskSpace quota of /dir1 is exceeded: quota = 1024 B = 1 KB but diskspace consumed = [0-9]+ B = [0-9.]+ [KMG]B*</expected-output>
         </comparator>
       </comparators>
     </test>



Mime
View raw message