hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1128459 [3/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/ ivy/ src/c++/libhdfs/ src/c++/libhdfs/tests/ src/contrib/ src/contrib/fuse-dfs/ src/contrib/fuse-dfs/src/ src/contrib/hdfsproxy/ src/docs/src/documentation/content/xdocs/ src/java...
Date Fri, 27 May 2011 21:12:05 GMT
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
Fri May 27 21:12:02 2011
@@ -305,7 +305,7 @@ public class MiniDFSCluster {
    * Servers will be started on free ports.
    * <p>
    * The caller must manage the creation of NameNode and DataNode directories
-   * and have already set dfs.name.dir and dfs.data.dir in the given conf.
+   * and have already set dfs.namenode.name.dir and dfs.datanode.data.dir in the given conf.
    * 
    * @param conf the base configuration to use in starting the servers.  This
    *          will be modified as necessary.
@@ -377,7 +377,7 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in
the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -407,7 +407,7 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in
the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -439,9 +439,9 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageNameDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in
the conf
    * @param manageDataDfsDirs if true, the data directories for datanodes will
-   *          be created and dfs.data.dir set to same in the conf
+   *          be created and dfs.datanode.data.dir set to same in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -706,7 +706,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -737,7 +737,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -862,7 +862,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -892,7 +892,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
Fri May 27 21:12:02 2011
@@ -368,8 +368,7 @@ public class TestDFSShell extends TestCa
       argv[0] = "-ls";
       argv[1] = "/nonexistentfile";
       ret = ToolRunner.run(shell, argv);
-      assertTrue(" -lsr should fail ",
-          (ret < 0));
+      assertEquals(" -lsr should fail ", 1, ret);
       out.reset();
       srcFs.mkdirs(new Path("/testdir"));
       argv[0] = "-ls";
@@ -382,8 +381,7 @@ public class TestDFSShell extends TestCa
       argv[0] = "-ls";
       argv[1] = "/user/nonxistant/*";
       ret = ToolRunner.run(shell, argv);
-      assertTrue(" -ls on nonexistent glob returns -1",
-          (ret < 0));
+      assertEquals(" -ls on nonexistent glob returns 1", 1, ret);
       out.reset();
       argv[0] = "-mkdir";
       argv[1] = "/testdir";
@@ -410,7 +408,7 @@ public class TestDFSShell extends TestCa
       argv[1] = "/testfile";
       argv[2] = "file";
       ret = ToolRunner.run(shell, argv);
-      assertTrue("mv failed to rename", ret == -1);
+      assertEquals("mv failed to rename", 1,  ret);
       out.reset();
       argv = new String[3];
       argv[0] = "-mv";
@@ -434,7 +432,7 @@ public class TestDFSShell extends TestCa
       srcFs.mkdirs(srcFs.getHomeDirectory());
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue(" no error ", (ret == 0));
+      assertEquals(" no error ", 0, ret);
       assertTrue("empty path specified",
           (returned.lastIndexOf("empty string") == -1));
     } finally {
@@ -468,19 +466,19 @@ public class TestDFSShell extends TestCa
       argv[0] = "-ls";
       argv[1] = dstFs.getUri().toString() + "/";
       int ret = ToolRunner.run(shell, argv);
-      assertTrue("ls works on remote uri ", (ret==0));
+      assertEquals("ls works on remote uri ", 0, ret);
       //check for rm -r 
       dstFs.mkdirs(new Path("/hadoopdir"));
       argv = new String[2];
       argv[0] = "-rmr";
       argv[1] = dstFs.getUri().toString() + "/hadoopdir";
       ret = ToolRunner.run(shell, argv);
-      assertTrue("-rmr works on remote uri " + argv[1], (ret==0));
+      assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
       //check du 
       argv[0] = "-du";
       argv[1] = dstFs.getUri().toString() + "/";
       ret = ToolRunner.run(shell, argv);
-      assertTrue("du works on remote uri ", (ret ==0));
+      assertEquals("du works on remote uri ", 0, ret);
       //check put
       File furi = new File(TEST_ROOT_DIR, "furi");
       createLocalFile(furi);
@@ -489,20 +487,20 @@ public class TestDFSShell extends TestCa
       argv[1] = furi.toString();
       argv[2] = dstFs.getUri().toString() + "/furi";
       ret = ToolRunner.run(shell, argv);
-      assertTrue(" put is working ", (ret==0));
+      assertEquals(" put is working ", 0, ret);
       //check cp 
       argv[0] = "-cp";
       argv[1] = dstFs.getUri().toString() + "/furi";
       argv[2] = srcFs.getUri().toString() + "/furi";
       ret = ToolRunner.run(shell, argv);
-      assertTrue(" cp is working ", (ret==0));
+      assertEquals(" cp is working ", 0, ret);
       assertTrue(srcFs.exists(new Path("/furi")));
       //check cat 
       argv = new String[2];
       argv[0] = "-cat";
       argv[1] = dstFs.getUri().toString() + "/furi";
       ret = ToolRunner.run(shell, argv);
-      assertTrue(" cat is working ", (ret == 0));
+      assertEquals(" cat is working ", 0, ret);
       //check chown
       dstFs.delete(new Path("/furi"), true);
       dstFs.delete(new Path("/hadoopdir"), true);
@@ -519,15 +517,15 @@ public class TestDFSShell extends TestCa
       argv[0] = "-cat";
       argv[1] = "hdfs:///furi";
       ret = ToolRunner.run(shell, argv);
-      assertTrue(" default works for cat", (ret == 0));
+      assertEquals(" default works for cat", 0, ret);
       argv[0] = "-ls";
       argv[1] = "hdfs:///";
       ret = ToolRunner.run(shell, argv);
-      assertTrue("default works for ls ", (ret == 0));
+      assertEquals("default works for ls ", 0, ret);
       argv[0] = "-rmr";
       argv[1] = "hdfs:///furi";
       ret = ToolRunner.run(shell, argv);
-      assertTrue("default works for rm/rmr", (ret ==0));
+      assertEquals("default works for rm/rmr", 0, ret);
     } finally {
       System.setProperty("test.build.data", bak);
       if (null != srcCluster) {
@@ -651,7 +649,7 @@ public class TestDFSShell extends TestCa
       {
         String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR};
         try {   
-          assertEquals(-1, shell.run(args));
+          assertEquals(1, shell.run(args));
         } catch (Exception e) {
           System.err.println("Exception raised from DFSShell.run " +
                             e.getLocalizedMessage());
@@ -750,13 +748,22 @@ public class TestDFSShell extends TestCa
 
   //throws IOException instead of Exception as shell.run() does.
   private int runCmd(FsShell shell, String... args) throws IOException {
-    try {
-      return shell.run(args);
+    StringBuilder cmdline = new StringBuilder("RUN:");
+    for (String arg : args) cmdline.append(" " + arg);
+    LOG.info(cmdline.toString());
+    try {
+      int exitCode;
+      exitCode = shell.run(args);
+      LOG.info("RUN: "+args[0]+" exit=" + exitCode);
+      return exitCode;
     } catch (IOException e) {
+      LOG.error("RUN: "+args[0]+" IOException="+e.getMessage());
       throw e;
     } catch (RuntimeException e) {
+      LOG.error("RUN: "+args[0]+" RuntimeException="+e.getMessage());
       throw e;
     } catch (Exception e) {
+      LOG.error("RUN: "+args[0]+" Exception="+e.getMessage());
       throw new IOException(StringUtils.stringifyException(e));
     }
   }
@@ -1112,7 +1119,7 @@ public class TestDFSShell extends TestCa
           System.err.println("Exception raised from DFSShell.run " +
                              e.getLocalizedMessage());
         }
-        assertTrue(val == 0);
+        assertEquals(0, val);
 
         // this should fail
         String[] args1 = new String[3];
@@ -1126,7 +1133,7 @@ public class TestDFSShell extends TestCa
           System.err.println("Exception raised from DFSShell.run " +
                              e.getLocalizedMessage());
         }
-        assertTrue(val == -1);
+        assertEquals(1, val);
 
         // this should succeed
         args1[0] = "-cp";
@@ -1139,7 +1146,7 @@ public class TestDFSShell extends TestCa
           System.err.println("Exception raised from DFSShell.run " +
                              e.getLocalizedMessage());
         }
-        assertTrue(val == 0);
+        assertEquals(0, val);
       }
         
     } finally {
@@ -1206,7 +1213,7 @@ public class TestDFSShell extends TestCa
           args[0] = "-ls";
           args[1] = "/foo";
           int ret = ToolRunner.run(fshell, args);
-          assertEquals("returned should be -1", -1, ret);
+          assertEquals("returned should be 1", 1, ret);
           String str = out.toString();
           assertTrue("permission denied printed", 
                      str.indexOf("Permission denied") != -1);
@@ -1271,7 +1278,7 @@ public class TestDFSShell extends TestCa
       show("files=" + files);
       corrupt(files);
 
-      assertEquals(null, runner.run(-1));
+      assertEquals(null, runner.run(1));
       String corruptedcontent = runner.run(0, "-ignoreCrc");
       assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
       assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0));
@@ -1304,7 +1311,7 @@ public class TestDFSShell extends TestCa
       String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
         @Override
         public String run() throws Exception {
-          return runLsr(new FsShell(conf), root, -1);
+          return runLsr(new FsShell(conf), root, 1);
         }
       });
       assertTrue(results.contains("zzz"));

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
Fri May 27 21:12:02 2011
@@ -117,7 +117,7 @@ public class TestDFSStorageStateRecovery
   
   /**
    * Sets up the storage directories for namenode as defined by
-   * dfs.name.dir. For each element in dfs.name.dir, the subdirectories 
+   * dfs.namenode.name.dir. For each element in dfs.namenode.name.dir, the subdirectories

    * represented by the first four elements of the <code>state</code> array
    * will be created and populated.
    * 
@@ -145,7 +145,7 @@ public class TestDFSStorageStateRecovery
   
   /**
    * Sets up the storage directories for a datanode under
-   * dfs.data.dir. For each element in dfs.data.dir, the subdirectories 
+   * dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories

    * represented by the first four elements of the <code>state</code> array 
    * will be created and populated. 
    * See {@link UpgradeUtilities#createDataNodeStorageDirs()}
@@ -172,7 +172,7 @@ public class TestDFSStorageStateRecovery
   
   /**
    * Sets up the storage directories for a block pool under
-   * dfs.data.dir. For each element in dfs.data.dir, the subdirectories 
+   * dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories

    * represented by the first four elements of the <code>state</code> array 
    * will be created and populated. 
    * See {@link UpgradeUtilities#createBlockPoolStorageDirs()}

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
Fri May 27 21:12:02 2011
@@ -135,7 +135,8 @@ public class TestHDFSServerPorts extends
     
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         fileAsURI(new File(hdfsDir, "name2")).toString());
-    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     
     // Start BackupNode
     String[] args = new String [] { StartupOption.BACKUP.getName() };

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
Fri May 27 21:12:02 2011
@@ -45,7 +45,7 @@ public class TestSafeMode extends TestCa
    * Name-node should stay in automatic safe-mode.</li>
    * <li>Enter safe mode manually.</li>
    * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and

    * verify that the name-node is still in safe mode.</li>
    * </ol>
    *  
@@ -92,7 +92,7 @@ public class TestSafeMode extends TestCa
       
       LOG.info("Datanode is started.");
 
-      // wait longer than dfs.safemode.extension
+      // wait longer than dfs.namenode.safemode.extension
       try {
         Thread.sleep(2000);
       } catch (InterruptedException ignored) {}

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
Fri May 27 21:12:02 2011
@@ -182,7 +182,7 @@ public class UpgradeUtilities {
   }
   
   /**
-   * Initialize dfs.name.dir and dfs.data.dir with the specified number of
+   * Initialize dfs.namenode.name.dir and dfs.datanode.data.dir with the specified number
of
    * directory entries. Also initialize dfs.blockreport.intervalMsec.
    */
   public static Configuration initializeStorageStateConf(int numDirs,
@@ -307,7 +307,7 @@ public class UpgradeUtilities {
   }
   
   /**
-   * Simulate the <code>dfs.name.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.namenode.name.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of namenode storage directory that comes from a singleton
    * namenode master (that contains edits, fsimage, version and time files). 
@@ -334,7 +334,7 @@ public class UpgradeUtilities {
   }  
   
   /**
-   * Simulate the <code>dfs.data.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of datanode storage directory that comes from a singleton
    * datanode master (that contains version and block files). If the destination
@@ -361,7 +361,7 @@ public class UpgradeUtilities {
   }
   
   /**
-   * Simulate the <code>dfs.data.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of block pool storage directory that comes from a singleton
    * datanode master (that contains version and block files). If the destination

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
Fri May 27 21:12:02 2011
@@ -40,7 +40,7 @@
 # some recoverable errors (i.e. corrupt or missing .crc files).
 #
 # A similar set of files exist in two different DFS directories. 
-# For e.g. "top-dir-1Mb-512" contains files created with dfs.block.size of 1Mb 
+# For e.g. "top-dir-1Mb-512" contains files created with dfs.blocksize of 1Mb 
 # and io.bytes.per.checksum of 512.
 #
 # In the future, when Hadoop project no longer supports upgrade from

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
Fri May 27 21:12:02 2011
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -129,32 +132,91 @@ public class TestDataNodeVolumeFailureTo
     DFSTestUtil.waitReplication(fs, file2, (short)2);
   }
 
+  /** 
+   * Restart the cluster with a new volume tolerated value.
+   * @param volTolerated
+   * @param manageCluster
+   * @throws IOException
+   */
+  private void restartCluster(int volTolerated, boolean manageCluster)
+      throws IOException {
+    //Make sure no datanode is running
+    cluster.shutdownDataNodes();
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, volTolerated);
+    cluster.startDataNodes(conf, 1, manageCluster, null, null);
+    cluster.waitActive();
+  }
+
   /**
-   * Test invalid DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY values.
+   * Test for different combination of volume configs and volumes tolerated 
+   * values.
    */
   @Test
-  public void testInvalidFailedVolumesConfig() throws Exception {
-    assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
-
-    /*
-     * Bring up another datanode that has an invalid value set.
-     * We should still be able to create a file with two replicas
-     * since the minimum valid volume parameter is only checked
-     * when we experience a disk error.
-     */
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -1);
-    cluster.startDataNodes(conf, 1, true, null, null);
-    cluster.waitActive();
-    Path file1 = new Path("/test1");
-    DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
-    DFSTestUtil.waitReplication(fs, file1, (short)2);
+  public void testVolumeAndTolerableConfiguration() throws Exception {
+    // Check if Block Pool Service exit for an invalid conf value.
+    testVolumeConfig(-1, 0, false, true);
 
     // Ditto if the value is too big.
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 100);
-    cluster.startDataNodes(conf, 1, true, null, null);
-    cluster.waitActive();
-    Path file2 = new Path("/test1");
-    DFSTestUtil.createFile(fs, file2, 1024, (short)2, 1L);
-    DFSTestUtil.waitReplication(fs, file2, (short)2);
+    testVolumeConfig(100, 0, false, true);
+    
+    // Test for one failed volume
+    testVolumeConfig(0, 1, false, false);
+    
+    // Test for one failed volume with 1 tolerable volume
+    testVolumeConfig(1, 1, true, false);
+    
+    // Test all good volumes
+    testVolumeConfig(0, 0, true, false);
+    
+    // Test all failed volumes
+    testVolumeConfig(0, 2, false, false);
   }
+
+  /**
+   * Tests for a given volumes to be tolerated and volumes failed.
+   * 
+   * @param volumesTolerated
+   * @param volumesFailed
+   * @param expectedBPServiceState
+   * @param clusterManaged
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void testVolumeConfig(int volumesTolerated, int volumesFailed,
+      boolean expectedBPServiceState, boolean clusterManaged)
+      throws IOException, InterruptedException {
+    assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
+    final int dnIndex = 0;
+    File[] dirs = {
+        new File(MiniDFSCluster.getStorageDir(dnIndex, 0), "current"),
+        new File(MiniDFSCluster.getStorageDir(dnIndex, 1), "current") };
+
+    try {
+      for (int i = 0; i < volumesFailed; i++) {
+        prepareDirToFail(dirs[i]);
+      }
+      restartCluster(volumesTolerated, clusterManaged);
+      assertEquals(expectedBPServiceState, cluster.getDataNodes().get(0)
+          .isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
+    } finally {
+      // restore its old permission
+      for (File dir : dirs) {
+        FileUtil.chmod(dir.toString(), "755");
+      }
+    }
+  }
+
+  /** 
+   * Prepare directories for a failure, set dir permission to 000
+   * @param dir
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void prepareDirToFail(File dir) throws IOException,
+      InterruptedException {
+    dir.mkdirs();
+    assertTrue("Couldn't chmod local vol", FileUtil
+        .chmod(dir.toString(), "000") == 0);
+  }
+
 }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
Fri May 27 21:12:02 2011
@@ -38,27 +38,36 @@ public class TestDatanodeJsp {
   
   private static final String FILE_DATA = "foo bar baz biz buz";
   
-  private static void testViewingFile(MiniDFSCluster cluster, String filePath) throws IOException
{
+  private static void testViewingFile(MiniDFSCluster cluster, String filePath,
+      boolean doTail) throws IOException {
     FileSystem fs = cluster.getFileSystem();
     
     Path testPath = new Path(filePath);
-    DFSTestUtil.writeFile(fs, testPath, FILE_DATA);
+    if (!fs.exists(testPath)) {
+      DFSTestUtil.writeFile(fs, testPath, FILE_DATA);
+    }
     
     InetSocketAddress nnIpcAddress = cluster.getNameNode().getNameNodeAddress();
     InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
     int dnInfoPort = cluster.getDataNodes().get(0).getInfoPort();
     
-    URL url = new URL("http://localhost:" + dnInfoPort + "/browseDirectory.jsp" +
-        JspHelper.getUrlParam("dir", URLEncoder.encode(testPath.toString(), "UTF-8"), true)
+
-        JspHelper.getUrlParam("namenodeInfoPort", nnHttpAddress.getPort() + 
-        JspHelper.getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort())));
+    String jspName = doTail ? "tail.jsp" : "browseDirectory.jsp";
+    String fileParamName = doTail ? "filename" : "dir";
+    
+    URL url = new URL("http://localhost:" + dnInfoPort + "/" + jspName +
+        JspHelper.getUrlParam(fileParamName, URLEncoder.encode(testPath.toString(), "UTF-8"),
true) +
+        JspHelper.getUrlParam("namenodeInfoPort", Integer.toString(nnHttpAddress.getPort()))
+ 
+        JspHelper.getUrlParam("nnaddr", "localhost:" + nnIpcAddress.getPort()));
     
     String viewFilePage = DFSTestUtil.urlGet(url);
     
     assertTrue("page should show preview of file contents", viewFilePage.contains(FILE_DATA));
-    assertTrue("page should show link to download file", viewFilePage
-        .contains("/streamFile" + URIUtil.encodePath(testPath.toString()) +
-            "?nnaddr=localhost:" + nnIpcAddress.getPort()));
+    
+    if (!doTail) {
+      assertTrue("page should show link to download file", viewFilePage
+          .contains("/streamFile" + URIUtil.encodePath(testPath.toString()) +
+              "?nnaddr=localhost:" + nnIpcAddress.getPort()));
+    }
   }
   
   @Test
@@ -69,9 +78,13 @@ public class TestDatanodeJsp {
       cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
       
-      testViewingFile(cluster, "/test-file");
-      testViewingFile(cluster, "/tmp/test-file");
-      testViewingFile(cluster, "/tmp/test-file%with goofy&characters");
+      testViewingFile(cluster, "/test-file", false);
+      testViewingFile(cluster, "/tmp/test-file", false);
+      testViewingFile(cluster, "/tmp/test-file%with goofy&characters", false);
+      
+      testViewingFile(cluster, "/test-file", true);
+      testViewingFile(cluster, "/tmp/test-file", true);
+      testViewingFile(cluster, "/tmp/test-file%with goofy&characters", true);
       
     } finally {
       if (cluster != null) {

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
Fri May 27 21:12:02 2011
@@ -181,7 +181,7 @@ public class TestDiskError {
       DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
     FsPermission expected = new FsPermission(permStr);
 
-    // Check permissions on directories in 'dfs.data.dir'
+    // Check permissions on directories in 'dfs.datanode.data.dir'
     FileSystem localFS = FileSystem.getLocal(conf);
     for (DataNode dn : cluster.getDataNodes()) {
       String[] dataDirs =

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
Fri May 27 21:12:02 2011
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.server.com
  *   
  *   Create a name node's edits log in /tmp/EditsLogOut.
  *   The file /tmp/EditsLogOut/current/edits can be copied to a name node's
- *   dfs.name.dir/current direcotry and the name node can be started as usual.
+ *   dfs.namenode.name.dir/current direcotry and the name node can be started as usual.
  *   
  *   The files are created in /createdViaInjectingInEditsLog
  *   The file names contain the starting and ending blockIds; hence once can 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
Fri May 27 21:12:02 2011
@@ -85,7 +85,8 @@ public class TestBackupNode extends Test
     Configuration c = new HdfsConfiguration(conf);
     String dirs = getBackupNodeDir(t, i);
     c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
-    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
+    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     return (BackupNode)NameNode.createNameNode(new String[]{t.getName()}, c);
   }
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
Fri May 27 21:12:02 2011
@@ -37,8 +37,8 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 /**
- * This class tests various combinations of dfs.name.dir 
- * and dfs.name.edits.dir configurations.
+ * This class tests various combinations of dfs.namenode.name.dir 
+ * and dfs.namenode.edits.dir configurations.
  */
 public class TestNameEditsConfigs extends TestCase {
   static final long SEED = 0xDEADBEEFL;
@@ -116,7 +116,7 @@ public class TestNameEditsConfigs extend
   }
 
   /**
-   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * The test creates files and restarts cluster with different configs.
    * 1. Starts cluster with shared name and edits dirs
    * 2. Restarts cluster by adding additional (different) name and edits dirs
@@ -158,7 +158,7 @@ public class TestNameEditsConfigs extend
         new File(checkpointNameDir, "current"));
     
     
-    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
@@ -185,7 +185,7 @@ public class TestNameEditsConfigs extend
       secondary.shutdown();
     }
 
-    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf =  new HdfsConfiguration();
     assertTrue(newNameDir.mkdir());
     assertTrue(newEditsDir.mkdir());
@@ -309,7 +309,7 @@ public class TestNameEditsConfigs extend
   }
 
   /**
-   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * This test tries to simulate failure scenarios.
    * 1. Start cluster with shared name and edits dir
    * 2. Restart cluster by adding separate name and edits dirs
@@ -330,7 +330,7 @@ public class TestNameEditsConfigs extend
     File newEditsDir = new File(base_dir, "edits");
     File nameAndEdits = new File(base_dir, "name_and_edits");
     
-    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
@@ -356,7 +356,7 @@ public class TestNameEditsConfigs extend
       cluster.shutdown();
     }
 
-    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf =  new HdfsConfiguration();
     assertTrue(newNameDir.mkdir());
     assertTrue(newEditsDir.mkdir());

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
Fri May 27 21:12:02 2011
@@ -49,7 +49,7 @@ public class TestSafeMode extends TestCa
    * Name-node should stay in automatic safe-mode.</li>
    * <li>Enter safe mode manually.</li>
    * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and

    * verify that the name-node is still in safe mode.</li>
    * </ol>
    *  
@@ -96,7 +96,7 @@ public class TestSafeMode extends TestCa
       
       LOG.info("Datanode is started.");
 
-      // wait longer than dfs.safemode.extension
+      // wait longer than dfs.namenode.safemode.extension
       try {
         Thread.sleep(2000);
       } catch (InterruptedException ignored) {}

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Fri May 27 21:12:02 2011
@@ -367,11 +367,12 @@ public class TestStartup extends TestCas
     LOG.info("Test compressing image.");
     Configuration conf = new Configuration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
     File base_dir = new File(System.getProperty(
         "test.build.data", "build/test/data"), "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
-    conf.setBoolean("dfs.permissions", false);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        new File(base_dir, "name").getPath());
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
 
     DFSTestUtil.formatNameNode(conf);
 

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
Fri May 27 21:12:02 2011
@@ -264,7 +264,7 @@ public class TestStorageRestore extends 
 
       FSImage fsi = cluster.getNameNode().getFSImage();
 
-      // it is started with dfs.name.dir.restore set to true (in SetUp())
+      // it is started with dfs.namenode.name.dir.restore set to true (in SetUp())
       boolean restore = fsi.getStorage().getRestoreFailedStorage();
       LOG.info("Restore is " + restore);
       assertEquals(restore, true);

Copied: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
(from r1128452, hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java)
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?p2=hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java&p1=hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java&r1=1128452&r2=1128459&rev=1128459&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
Fri May 27 21:12:02 2011
@@ -22,6 +22,8 @@ import static org.junit.Assert.*;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -29,6 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
+
 public class TestTransferFsImage {
 
   /**
@@ -45,11 +48,10 @@ public class TestTransferFsImage {
       
       String fsName = NameNode.getHostPortString(
           cluster.getNameNode().getHttpAddress());
-      String id = "getimage=1";
+      String id = "getimage=1&txid=0";
 
-      File[] localPath = new File[] {
-         new File("/xxxxx-does-not-exist/blah") 
-      };
+      List<File> localPath = Collections.<File>singletonList(
+         new File("/xxxxx-does-not-exist/blah"));
     
       TransferFsImage.getFileClient(fsName, id, localPath, false);
       fail("Didn't get an exception!");

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Fri May 27 21:12:02 2011
@@ -89,7 +89,7 @@ public class TestNameNodeMetrics extends
   }
 
   private void updateMetrics() throws Exception {
-    // Wait for metrics update (corresponds to dfs.replication.interval
+    // Wait for metrics update (corresponds to dfs.namenode.replication.interval
     // for some block related metrics to get updated)
     Thread.sleep(1000);
   }

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/system/conf/system-test-hdfs.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/system/conf/system-test-hdfs.xml?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/system/conf/system-test-hdfs.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/system/conf/system-test-hdfs.xml Fri May 27 21:12:02
2011
@@ -118,7 +118,7 @@
   <description>
     Local file system path on gate way to cluster-controller binary including the binary
name.
     To build the binary the following commands need to be executed:
-     % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_HOME of setup cluster)
+     % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster)
      % cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path
     Location of the cluster is important security precaution.
     The binary should be owned by root and test user group permission should be set such
a

Modified: hadoop/hdfs/branches/HDFS-1073/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java?rev=1128459&r1=1128458&r2=1128459&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
(original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
Fri May 27 21:12:02 2011
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.test.syst
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.test.system.AbstractDaemonClient;
 import org.apache.hadoop.test.system.DaemonProtocol;
 import org.apache.hadoop.test.system.process.RemoteProcess;
@@ -34,10 +35,12 @@ public abstract class HDFSDaemonClient<P
   }
 
   public String[] getHDFSDataDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings("dfs.data.dir");
+    return getProxy().getDaemonConf().getStrings(
+        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
   }
 
   public String getHDFSNameDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings("dfs.name.dir")[0];
+    return getProxy().getDaemonConf().getStrings(
+        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0];
   }
 }

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1126286
+/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1128452

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1126286
+/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1128452

Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri May 27 21:12:02 2011
@@ -3,4 +3,4 @@
 /hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512
 /hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1126286
+/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1128452



Mime
View raw message