hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1127823 - in /hadoop/hdfs/trunk: ./ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metr...
Date Thu, 26 May 2011 08:11:37 GMT
Author: eli
Date: Thu May 26 08:11:36 2011
New Revision: 1127823

URL: http://svn.apache.org/viewvc?rev=1127823&view=rev
Log:
HDFS-1999. Tests use deprecated configs. Contributed by Aaron T. Myers

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
    hadoop/hdfs/trunk/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Thu May 26 08:11:36 2011
@@ -624,6 +624,8 @@ Trunk (unreleased changes)
     HDFS-1983. Fix path display for copy and rm commands in TestHDFSCLI and
     TestDFSShell. (Daryn Sharp via todd)
 
+    HDFS-1999. Tests use deprecated configs. (Aaron T. Myers via eli)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu May 26
08:11:36 2011
@@ -305,7 +305,7 @@ public class MiniDFSCluster {
    * Servers will be started on free ports.
    * <p>
    * The caller must manage the creation of NameNode and DataNode directories
-   * and have already set dfs.name.dir and dfs.data.dir in the given conf.
+   * and have already set dfs.namenode.name.dir and dfs.datanode.data.dir in the given conf.
    * 
    * @param conf the base configuration to use in starting the servers.  This
    *          will be modified as necessary.
@@ -377,7 +377,7 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in
the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -407,7 +407,7 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in
the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -439,9 +439,9 @@ public class MiniDFSCluster {
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageNameDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in
the conf
    * @param manageDataDfsDirs if true, the data directories for datanodes will
-   *          be created and dfs.data.dir set to same in the conf
+   *          be created and dfs.datanode.data.dir set to same in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -706,7 +706,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -737,7 +737,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -862,7 +862,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -892,7 +892,7 @@ public class MiniDFSCluster {
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
Thu May 26 08:11:36 2011
@@ -110,7 +110,7 @@ public class TestDFSStorageStateRecovery
   
   /**
    * Sets up the storage directories for namenode as defined by
-   * dfs.name.dir. For each element in dfs.name.dir, the subdirectories 
+   * dfs.namenode.name.dir. For each element in dfs.namenode.name.dir, the subdirectories

    * represented by the first four elements of the <code>state</code> array
    * will be created and populated.
    * 
@@ -139,7 +139,7 @@ public class TestDFSStorageStateRecovery
   
   /**
    * Sets up the storage directories for a datanode under
-   * dfs.data.dir. For each element in dfs.data.dir, the subdirectories 
+   * dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories

    * represented by the first four elements of the <code>state</code> array 
    * will be created and populated. 
    * See {@link UpgradeUtilities#createDataNodeStorageDirs()}
@@ -167,7 +167,7 @@ public class TestDFSStorageStateRecovery
   
   /**
    * Sets up the storage directories for a block pool under
-   * dfs.data.dir. For each element in dfs.data.dir, the subdirectories 
+   * dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories

    * represented by the first four elements of the <code>state</code> array 
    * will be created and populated. 
    * See {@link UpgradeUtilities#createBlockPoolStorageDirs()}

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Thu May
26 08:11:36 2011
@@ -135,7 +135,8 @@ public class TestHDFSServerPorts extends
     
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         fileAsURI(new File(hdfsDir, "name2")).toString());
-    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     
     // Start BackupNode
     String[] args = new String [] { StartupOption.BACKUP.getName() };

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java Thu May 26 08:11:36
2011
@@ -45,7 +45,7 @@ public class TestSafeMode extends TestCa
    * Name-node should stay in automatic safe-mode.</li>
    * <li>Enter safe mode manually.</li>
    * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and

    * verify that the name-node is still in safe mode.</li>
    * </ol>
    *  
@@ -92,7 +92,7 @@ public class TestSafeMode extends TestCa
       
       LOG.info("Datanode is started.");
 
-      // wait longer than dfs.safemode.extension
+      // wait longer than dfs.namenode.safemode.extension
       try {
         Thread.sleep(2000);
       } catch (InterruptedException ignored) {}

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Thu May 26
08:11:36 2011
@@ -180,7 +180,7 @@ public class UpgradeUtilities {
   }
   
   /**
-   * Initialize dfs.name.dir and dfs.data.dir with the specified number of
+   * Initialize dfs.namenode.name.dir and dfs.datanode.data.dir with the specified number
of
    * directory entries. Also initialize dfs.blockreport.intervalMsec.
    */
   public static Configuration initializeStorageStateConf(int numDirs,
@@ -305,7 +305,7 @@ public class UpgradeUtilities {
   }
   
   /**
-   * Simulate the <code>dfs.name.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.namenode.name.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of namenode storage directory that comes from a singleton
    * namenode master (that contains edits, fsimage, version and time files). 
@@ -332,7 +332,7 @@ public class UpgradeUtilities {
   }  
   
   /**
-   * Simulate the <code>dfs.data.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of datanode storage directory that comes from a singleton
    * datanode master (that contains version and block files). If the destination
@@ -359,7 +359,7 @@ public class UpgradeUtilities {
   }
   
   /**
-   * Simulate the <code>dfs.data.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of block pool storage directory that comes from a singleton
    * datanode master (that contains version and block files). If the destination

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt Thu May 26 08:11:36
2011
@@ -40,7 +40,7 @@
 # some recoverable errors (i.e. corrupt or missing .crc files).
 #
 # A similar set of files exist in two different DFS directories. 
-# For e.g. "top-dir-1Mb-512" contains files created with dfs.block.size of 1Mb 
+# For e.g. "top-dir-1Mb-512" contains files created with dfs.blocksize of 1Mb 
 # and io.bytes.per.checksum of 512.
 #
 # In the future, when Hadoop project no longer supports upgrade from

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
Thu May 26 08:11:36 2011
@@ -181,7 +181,7 @@ public class TestDiskError {
       DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
     FsPermission expected = new FsPermission(permStr);
 
-    // Check permissions on directories in 'dfs.data.dir'
+    // Check permissions on directories in 'dfs.datanode.data.dir'
     FileSystem localFS = FileSystem.getLocal(conf);
     for (DataNode dn : cluster.getDataNodes()) {
       String[] dataDirs =

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
Thu May 26 08:11:36 2011
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.com
  *   
  *   Create a name node's edits log in /tmp/EditsLogOut.
  *   The file /tmp/EditsLogOut/current/edits can be copied to a name node's
- *   dfs.name.dir/current direcotry and the name node can be started as usual.
+ *   dfs.namenode.name.dir/current direcotry and the name node can be started as usual.
  *   
  *   The files are created in /createdViaInjectingInEditsLog
  *   The file names contain the starting and ending blockIds; hence once can 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
Thu May 26 08:11:36 2011
@@ -85,7 +85,8 @@ public class TestBackupNode extends Test
     Configuration c = new HdfsConfiguration(conf);
     String dirs = getBackupNodeDir(t, i);
     c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
-    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
+    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     return (BackupNode)NameNode.createNameNode(new String[]{t.getName()}, c);
   }
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
Thu May 26 08:11:36 2011
@@ -30,8 +30,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /**
- * This class tests various combinations of dfs.name.dir 
- * and dfs.name.edits.dir configurations.
+ * This class tests various combinations of dfs.namenode.name.dir 
+ * and dfs.namenode.edits.dir configurations.
  */
 public class TestNameEditsConfigs extends TestCase {
   static final long SEED = 0xDEADBEEFL;
@@ -100,7 +100,7 @@ public class TestNameEditsConfigs extend
   }
 
   /**
-   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * The test creates files and restarts cluster with different configs.
    * 1. Starts cluster with shared name and edits dirs
    * 2. Restarts cluster by adding additional (different) name and edits dirs
@@ -127,7 +127,7 @@ public class TestNameEditsConfigs extend
     File checkpointEditsDir = new File(base_dir, "secondedits");
     File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
     
-    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
@@ -154,7 +154,7 @@ public class TestNameEditsConfigs extend
       secondary.shutdown();
     }
 
-    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf =  new HdfsConfiguration();
     assertTrue(newNameDir.mkdir());
     assertTrue(newEditsDir.mkdir());
@@ -282,7 +282,7 @@ public class TestNameEditsConfigs extend
   }
 
   /**
-   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * This test tries to simulate failure scenarios.
    * 1. Start cluster with shared name and edits dir
    * 2. Restart cluster by adding separate name and edits dirs
@@ -303,7 +303,7 @@ public class TestNameEditsConfigs extend
     File newEditsDir = new File(base_dir, "edits");
     File nameAndEdits = new File(base_dir, "name_and_edits");
     
-    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
@@ -325,7 +325,7 @@ public class TestNameEditsConfigs extend
       cluster.shutdown();
     }
 
-    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf =  new HdfsConfiguration();
     assertTrue(newNameDir.mkdir());
     assertTrue(newEditsDir.mkdir());

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
Thu May 26 08:11:36 2011
@@ -49,7 +49,7 @@ public class TestSafeMode extends TestCa
    * Name-node should stay in automatic safe-mode.</li>
    * <li>Enter safe mode manually.</li>
    * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and

    * verify that the name-node is still in safe mode.</li>
    * </ol>
    *  
@@ -96,7 +96,7 @@ public class TestSafeMode extends TestCa
       
       LOG.info("Datanode is started.");
 
-      // wait longer than dfs.safemode.extension
+      // wait longer than dfs.namenode.safemode.extension
       try {
         Thread.sleep(2000);
       } catch (InterruptedException ignored) {}

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Thu May 26 08:11:36 2011
@@ -367,11 +367,12 @@ public class TestStartup extends TestCas
     LOG.info("Test compressing image.");
     Configuration conf = new Configuration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
     File base_dir = new File(System.getProperty(
         "test.build.data", "build/test/data"), "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
-    conf.setBoolean("dfs.permissions", false);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        new File(base_dir, "name").getPath());
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
 
     DFSTestUtil.formatNameNode(conf);
 
@@ -426,11 +427,12 @@ public class TestStartup extends TestCas
   private void testImageChecksum(boolean compress) throws Exception {
     Configuration conf = new Configuration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
     File base_dir = new File(
         System.getProperty("test.build.data", "build/test/data"), "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
-    conf.setBoolean("dfs.permissions", false);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        new File(base_dir, "name").getPath());
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
     if (compress) {
       conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
Thu May 26 08:11:36 2011
@@ -340,7 +340,7 @@ public class TestStorageRestore extends 
 
       FSImage fsi = cluster.getNameNode().getFSImage();
 
-      // it is started with dfs.name.dir.restore set to true (in SetUp())
+      // it is started with dfs.namenode.name.dir.restore set to true (in SetUp())
       boolean restore = fsi.getStorage().getRestoreFailedStorage();
       LOG.info("Restore is " + restore);
       assertEquals(restore, true);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Thu May 26 08:11:36 2011
@@ -89,7 +89,7 @@ public class TestNameNodeMetrics extends
   }
 
   private void updateMetrics() throws Exception {
-    // Wait for metrics update (corresponds to dfs.replication.interval
+    // Wait for metrics update (corresponds to dfs.namenode.replication.interval
     // for some block related metrics to get updated)
     Thread.sleep(1000);
   }

Modified: hadoop/hdfs/trunk/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java?rev=1127823&r1=1127822&r2=1127823&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
(original)
+++ hadoop/hdfs/trunk/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
Thu May 26 08:11:36 2011
@@ -34,10 +34,12 @@ public abstract class HDFSDaemonClient<P
   }
 
   public String[] getHDFSDataDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings("dfs.data.dir");
+    return getProxy().getDaemonConf().getStrings(
+        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
   }
 
   public String getHDFSNameDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings("dfs.name.dir")[0];
+    return getProxy().getDaemonConf().getStrings(
+        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0];
   }
 }



Mime
View raw message