hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r820536 [3/4] - in /hadoop/hdfs/branches/branch-0.21: ./ src/ant/org/apache/hadoop/ant/ src/contrib/fuse-dfs/src/test/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/ s...
Date Thu, 01 Oct 2009 05:31:40 GMT
Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java Thu Oct  1 05:31:37 2009
@@ -128,11 +128,11 @@
     UpgradeUtilities.initialize();
     
     for (int numDirs = 1; numDirs <= 2; numDirs++) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);      
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
-      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
-      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
       
       log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Thu Oct  1 05:31:37 2009
@@ -177,7 +177,7 @@
   public void testUpgradeFromImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
         System.setProperty("test.build.data", "build/test/data");
       }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Thu Oct  1 05:31:37 2009
@@ -49,6 +49,7 @@
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -185,7 +186,7 @@
   
   @Test public void testOpWrite() throws IOException {
     int numDataNodes = 1;
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
     try {
@@ -316,7 +317,7 @@
     Path file = new Path("dataprotocol.dat");
     int numDataNodes = 1;
     
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setInt("dfs.replication", numDataNodes); 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
     try {
@@ -328,7 +329,7 @@
     dnAddr = NetUtils.createSocketAddr(datanode.getName());
     FileSystem fileSys = cluster.getFileSystem();
     
-    int fileLen = Math.min(conf.getInt("dfs.block.size", 4096), 4096);
+    int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
     
     createFile(fileSys, file, fileLen);
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Thu Oct  1 05:31:37 2009
@@ -91,7 +91,7 @@
     
     long startTime = System.currentTimeMillis();
     
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     cluster.waitActive();
     
@@ -152,7 +152,7 @@
   }
 
   public void testBlockCorruptionPolicy() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setLong("dfs.blockreport.intervalMsec", 1000L);
     Random random = new Random();
     FileSystem fs = null;
@@ -262,11 +262,11 @@
                                              short numReplicas,
                                              int numCorruptReplicas) 
                                              throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setLong("dfs.blockreport.intervalMsec", 30L);
-    conf.setLong("dfs.replication.interval", 30);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 30);
     conf.setLong("dfs.heartbeat.interval", 30L);
-    conf.setBoolean("dfs.replication.considerLoad", false);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
@@ -371,7 +371,7 @@
   
   /** Test if NameNode handles truncated blocks in block report */
   public void testTruncatedBlockReport() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = (short)2;
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java Thu Oct  1 05:31:37 2009
@@ -283,11 +283,11 @@
    * dies.
    */
   private void complexTest() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 2);
-    conf.setInt("dfs.replication.pending.timeout.sec", 2);
-    conf.setInt("dfs.socket.timeout", 5000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
@@ -338,11 +338,11 @@
    * close the file.
    */
   private void simpleTest(int datanodeToKill) throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.replication.pending.timeout.sec", 2);
-    conf.setInt("dfs.socket.timeout", 5000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     int myMaxNodes = 5;
     System.out.println("SimpleTest starting with DataNode to Kill " + 
                        datanodeToKill);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java Thu Oct  1 05:31:37 2009
@@ -32,7 +32,7 @@
  * This test ensures the all types of data node report work correctly.
  */
 public class TestDatanodeReport extends TestCase {
-  final static private Configuration conf = new Configuration();
+  final static private Configuration conf = new HdfsConfiguration();
   final static private int NUM_OF_DATANODES = 4;
     
   /**
@@ -40,7 +40,7 @@
    */
   public void testDatanodeReport() throws Exception {
     conf.setInt(
-        "heartbeat.recheck.interval", 500); // 0.5s
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
     conf.setLong("dfs.heartbeat.interval", 1L);
     MiniDFSCluster cluster = 
       new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Thu Oct  1 05:31:37 2009
@@ -240,8 +240,8 @@
    * Tests Decommission in DFS.
    */
   public void testDecommission() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setBoolean("dfs.replication.considerLoad", false);
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
 
     // Set up the hosts/exclude files.
     FileSystem localFileSys = FileSystem.getLocal(conf);
@@ -251,9 +251,9 @@
     hostsFile = new Path(dir, "hosts");
     excludeFile = new Path(dir, "exclude");
     conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.replication.pending.timeout.sec", 4);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
     writeConfigFile(localFileSys, excludeFile, null);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java Thu Oct  1 05:31:37 2009
@@ -41,7 +41,7 @@
   }
 
   public void testGetAddressFromConf() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://foo/");
     assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
     FileSystem.setDefaultUri(conf, "hdfs://foo:555/");

Added: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java?rev=820536&view=auto
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java (added)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDeprecatedKeys.java Thu Oct  1 05:31:37 2009
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.log4j.Level;
+
+import junit.framework.TestCase;
+
+public class TestDeprecatedKeys extends TestCase {
+ 
+  //Tests a deprecated key
+  public void testDeprecatedKeys() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.set("topology.script.file.name", "xyz");
+    String scriptFile = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
+    assertTrue(scriptFile.equals("xyz")) ;
+    conf.setInt("dfs.replication.interval", 1);
+    String alpha = DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
+    int repInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3) ;
+    assertTrue(repInterval == 1) ;
+  }
+}

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Thu Oct  1 05:31:37 2009
@@ -36,14 +36,14 @@
   private static final Random RAN = new Random();
 
   public void testFileSystemCloseAll() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
     URI address = FileSystem.getDefaultUri(conf);
 
     try {
       FileSystem.closeAll();
 
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       FileSystem.setDefaultUri(conf, address);
       FileSystem.get(conf);
       FileSystem.get(conf);
@@ -59,7 +59,7 @@
    * multiple files are open.
    */
   public void testDFSClose() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
 
@@ -76,7 +76,7 @@
   }
 
   public void testDFSClient() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
 
     try {
@@ -165,19 +165,19 @@
     System.out.println("seed=" + seed);
     RAN.setSeed(seed);
 
-    final Configuration conf = new Configuration();
-    conf.set("slave.host.name", "localhost");
+    final Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem hdfs = cluster.getFileSystem();
-    final String hftpuri = "hftp://" + conf.get("dfs.http.address");
+    final String hftpuri = "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
     System.out.println("hftpuri=" + hftpuri);
     final FileSystem hftp = new Path(hftpuri).getFileSystem(conf);
 
     final String dir = "/filechecksum";
     final int block_size = 1024;
     final int buffer_size = conf.getInt("io.file.buffer.size", 4096);
-    conf.setInt("io.bytes.per.checksum", 512);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
 
     //try different number of blocks
     for(int n = 0; n < 5; n++) {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java Thu Oct  1 05:31:37 2009
@@ -286,9 +286,9 @@
   }
   
   public void testFSInputChecker() throws Exception {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_SUM);
     rand.nextBytes(expected);
 
     // test DFS

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java Thu Oct  1 05:31:37 2009
@@ -110,9 +110,9 @@
    * Test write opeation for output stream in DFS.
    */
   public void testFSOutputSummer() throws Exception {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", BYTES_PER_CHECKSUM);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
     MiniDFSCluster cluster = new MiniDFSCluster(
         conf, NUM_OF_DATANODES, true, null);
     fileSys = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Thu Oct  1 05:31:37 2009
@@ -102,7 +102,7 @@
    * @throws IOException an exception might be thrown
    */
   public void testCopyOnWrite() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -172,7 +172,7 @@
    * @throws IOException an exception might be thrown
    */
   public void testSimpleFlush() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -227,7 +227,7 @@
    * @throws IOException an exception might be thrown
    */
   public void testComplexFlush() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java Thu Oct  1 05:31:37 2009
@@ -80,7 +80,7 @@
    * @throws IOException an exception might be thrown
    */ 
   public void testSimpleAppend() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -331,11 +331,11 @@
    */
   public void testComplexAppend() throws IOException {
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 2000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 2);
-    conf.setInt("dfs.replication.pending.timeout.sec", 2);
-    conf.setInt("dfs.socket.timeout", 30000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
     conf.setInt("dfs.datanode.socket.write.timeout", 30000);
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setBoolean("dfs.support.append", true);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Thu Oct  1 05:31:37 2009
@@ -49,8 +49,8 @@
     return new TestSetup(new TestSuite(TestFileAppend3.class)) {
       protected void setUp() throws java.lang.Exception {
         AppendTestUtil.LOG.info("setUp()");
-        conf = new Configuration();
-        conf.setInt("io.bytes.per.checksum", 512);
+        conf = new HdfsConfiguration();
+        conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
         conf.setBoolean("dfs.support.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
         cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Thu Oct  1 05:31:37 2009
@@ -44,7 +44,7 @@
     MiniDFSCluster cluster = null;
     DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024);
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(conf, 3, true, null);
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
@@ -71,7 +71,7 @@
 
   /** check if local FS can handle corrupted blocks properly */
   public void testLocalFileCorruption() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
     FileSystem fs = FileSystem.getLocal(conf);
     DataOutputStream dos = fs.create(file);
@@ -99,7 +99,7 @@
   public void testArrayOutOfBoundsException() throws Exception {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(conf, 2, true, null);
       cluster.waitActive();
       

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Thu Oct  1 05:31:37 2009
@@ -177,10 +177,10 @@
    * Test that server default values can be retrieved on the client side
    */
   public void testServerDefaults() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", FSConstants.DEFAULT_BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
-    conf.setInt("dfs.write.packet.size", FSConstants.DEFAULT_WRITE_PACKET_SIZE);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, FSConstants.DEFAULT_BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, FSConstants.DEFAULT_WRITE_PACKET_SIZE);
     conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
     conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(conf,
@@ -204,7 +204,7 @@
    * Test that file data becomes available before file is closed.
    */
   public void testFileCreation() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -284,7 +284,7 @@
    * Test deleteOnExit
    */
   public void testDeleteOnExit() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -346,8 +346,8 @@
    * Test that file data does not become corrupted even in the face of errors.
    */
   public void testFileCreationError1() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -420,8 +420,8 @@
   public void testFileCreationError2() throws IOException {
     long leasePeriod = 1000;
     System.out.println("testFileCreationError2 start");
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -487,10 +487,10 @@
    * is needed to handle persistent leases.
    */
   public void xxxtestFileCreationNamenodeRestart() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -619,7 +619,7 @@
    * Test that all open files are closed when client dies abnormally.
    */
   public void testDFSClientDeath() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     System.out.println("Testing adbornal client death.");
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
@@ -655,7 +655,7 @@
    * Test file creation with all supported flags.
    */
   public void testFileCreationWithFlags() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -740,7 +740,7 @@
    * Test file creation using createNonRecursive().
    */
   public void testFileCreationNonRecursive() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -851,7 +851,7 @@
    * Test creating two files at the same time. 
    */
   public void testConcurrentFileCreation() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
 
     try {
@@ -889,8 +889,8 @@
     final long leasePeriod = 1000;
     final int DATANODE_NUM = 3;
 
-    Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
 
     // create cluster
@@ -946,7 +946,7 @@
     System.out.println("test file system close start");
     final int DATANODE_NUM = 3;
 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     // create cluster
     MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
@@ -974,8 +974,8 @@
     System.out.println("test testFsCloseAfterClusterShutdown start");
     final int DATANODE_NUM = 3;
 
-    Configuration conf = new Configuration();
-    conf.setInt("dfs.replication.min", 3);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 3);
     conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds
     conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java Thu Oct  1 05:31:37 2009
@@ -47,7 +47,7 @@
   /** Test lease recovery Triggered by DFSClient. */
   public void testClientTriggeredLeaseRecovery() throws Exception {
     final int REPLICATION = 3;
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setInt("dfs.datanode.handler.count", 1);
     conf.setInt("dfs.replication", REPLICATION);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java Thu Oct  1 05:31:37 2009
@@ -37,10 +37,10 @@
   }
 
   public void testFileCreationDeleteParent() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setBoolean("dfs.support.append", true);
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java Thu Oct  1 05:31:37 2009
@@ -50,8 +50,8 @@
     final long leasePeriod = 1000;
     final int DATANODE_NUM = 3;
 
-    final Configuration conf = new Configuration();
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
 
     // create cluster

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Thu Oct  1 05:31:37 2009
@@ -61,7 +61,7 @@
    * Tests various options of DFSShell.
    */
   public void testFileStatus() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fs = cluster.getFileSystem();
     final DFSClient dfsClient = new DFSClient(NameNode.getAddress(conf), conf);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java Thu Oct  1 05:31:37 2009
@@ -44,13 +44,13 @@
 public class TestGetBlocks extends TestCase {
   /** test getBlocks */
   public void testGetBlocks() throws Exception {
-    final Configuration CONF = new Configuration();
+    final Configuration CONF = new HdfsConfiguration();
 
     final short REPLICATION_FACTOR = (short)2;
     final int DEFAULT_BLOCK_SIZE = 1024;
     final Random r = new Random();
     
-    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     MiniDFSCluster cluster = new MiniDFSCluster(
           CONF, REPLICATION_FACTOR, true, null );
     try {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java Thu Oct  1 05:31:37 2009
@@ -29,7 +29,7 @@
 
   @Override
   protected void setUp() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster(conf, 2, true, null);
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Thu Oct  1 05:31:37 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.BackupNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.DNS;
 
 /**
@@ -90,10 +91,10 @@
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
       throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
     }
-    config = new Configuration();
-    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
+    config = new HdfsConfiguration();
+    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name1").getPath());
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
-    config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
     NameNode.format(config);
 
     String[] args = new String[] {};
@@ -119,8 +120,8 @@
     assertTrue(currDir2.mkdirs());
     assertTrue(currDir3.mkdirs());
     
-    conf.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
-    conf.set("dfs.name.edits.dir", "${dfs.name.dir}");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name2").getPath());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
     
     // Start BackupNode
     String[] args = new String [] { StartupOption.BACKUP.getName() };
@@ -136,7 +137,7 @@
   throws IOException {
     String dataDir = getTestingDir();
     File dataNodeDir = new File(dataDir, "data-" + index);
-    config.set("dfs.data.dir", dataNodeDir.getPath());
+    config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
 
     String[] args = new String[] {};
     // NameNode will modify config with the ports it bound to
@@ -244,8 +245,8 @@
       nn = startNameNode();
 
       // start another namenode on the same port
-      Configuration conf2 = new Configuration(config);
-      conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
+      Configuration conf2 = new HdfsConfiguration(config);
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name2").getPath());
       NameNode.format(conf2);
       boolean started = canStartNameNode(conf2);
       assertFalse(started); // should fail
@@ -258,7 +259,7 @@
       // reset conf2 since NameNode modifies it
       FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
       // different http port
-      conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       started = canStartNameNode(conf2);
       assertTrue(started); // should start now
     } finally {
@@ -275,8 +276,8 @@
       nn = startNameNode();
 
       // start data-node on the same port as name-node
-      Configuration conf2 = new Configuration(config);
-      conf2.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
+      Configuration conf2 = new HdfsConfiguration(config);
+      conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
       conf2.set("dfs.datanode.address",
                 FileSystem.getDefaultUri(config).getAuthority());
       conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
@@ -286,7 +287,7 @@
       // bind http server to the same port as name-node
       conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
       conf2.set("dfs.datanode.http.address", 
-                config.get("dfs.http.address"));
+                config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
       started = canStartDataNode(conf2);
       assertFalse(started); // should fail
     
@@ -310,18 +311,18 @@
       nn = startNameNode();
 
       // bind http server to the same port as name-node
-      Configuration conf2 = new Configuration(config);
-      conf2.set("dfs.secondary.http.address", 
-                config.get("dfs.http.address"));
+      Configuration conf2 = new HdfsConfiguration(config);
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, 
+                config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
       LOG.info("= Starting 1 on: " + 
-                                 conf2.get("dfs.secondary.http.address"));
+                                 conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
       boolean started = canStartSecondaryNode(conf2);
       assertFalse(started); // should fail
 
       // bind http server to a different port
-      conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       LOG.info("= Starting 2 on: " + 
-                                 conf2.get("dfs.secondary.http.address"));
+                                 conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
       started = canStartSecondaryNode(conf2);
       assertTrue(started); // should start now
     } finally {
@@ -338,20 +339,20 @@
         nn = startNameNode();
 
         // bind http server to the same port as name-node
-        Configuration backup_config = new Configuration(config);
-        backup_config.set("dfs.backup.http.address", 
-                                        backup_config.get("dfs.http.address"));
+        Configuration backup_config = new HdfsConfiguration(config);
+        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
+                                        backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
 
         LOG.info("= Starting 1 on: " + 
-                                  backup_config.get("dfs.backup.http.address"));
+                                  backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         assertFalse("Backup started on same port as Namenode", 
                            canStartBackupNode(backup_config)); // should fail
 
         // bind http server to a different port
-        backup_config.set("dfs.backup.http.address", NAME_NODE_HTTP_HOST + "0");
+        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
         LOG.info("= Starting 2 on: " + 
-                                  backup_config.get("dfs.backup.http.address"));
+                                  backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         assertTrue(canStartBackupNode(backup_config)); // should start now
       } finally {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java Thu Oct  1 05:31:37 2009
@@ -37,7 +37,7 @@
   public static Test suite() {
     TestSetup setup = new TestSetup(new TestSuite(TestHDFSTrash.class)) {
       protected void setUp() throws Exception {
-        Configuration conf = new Configuration();
+        Configuration conf = new HdfsConfiguration();
         cluster = new MiniDFSCluster(conf, 2, true, null);
       }
       protected void tearDown() throws Exception {
@@ -57,7 +57,7 @@
   public void testNonDefaultFS() throws IOException {
     FileSystem fs = cluster.getFileSystem();
     Configuration conf = fs.getConf();
-    conf.set("fs.default.name", fs.getUri().toString());
+    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
     trashNonDefaultFS(conf);
   }
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java Thu Oct  1 05:31:37 2009
@@ -22,6 +22,8 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataInputStream;
 import static org.junit.Assert.assertEquals;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -36,7 +38,7 @@
    */
   @Test
   public void hFlush_01() throws IOException {
-    doTheJob(new Configuration(), fName, AppendTestUtil.BLOCK_SIZE, (short)2);
+    doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE, (short)2);
   }
 
   /** The test uses {@link #doTheJob(Configuration, String, long, short)
@@ -45,12 +47,12 @@
    */
   @Test
   public void hFlush_02() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
     // Modify defaul filesystem settings
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
 
     doTheJob(conf, fName, customBlockSize, (short)2);
   }
@@ -61,12 +63,12 @@
    */
  @Test
   public void hFlush_03() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
     // Modify defaul filesystem settings
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
 
     doTheJob(conf, fName, customBlockSize, (short)2);
   }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java Thu Oct  1 05:31:37 2009
@@ -133,9 +133,9 @@
     }
     
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       conf.set("dfs.replication", Integer.toString(numDataNodes));
-      conf.setInt("io.bytes.per.checksum", checksumSize);
+      conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
       //first time format
       cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
@@ -165,9 +165,9 @@
        */
       
       LOG.info("Restarting minicluster");
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
-      conf.set("dfs.safemode.threshold.pct", "0.0f"); 
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
       
       cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
                                    true, null, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java Thu Oct  1 05:31:37 2009
@@ -31,7 +31,7 @@
   final Path dir = new Path("/test/lease/");
 
   public void testLease() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     try {
       FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Thu Oct  1 05:31:37 2009
@@ -63,8 +63,8 @@
    */
   public void testBlockSynchronization() throws Exception {
     final int ORG_FILE_SIZE = 3000; 
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", BLOCK_SIZE);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = null;
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Thu Oct  1 05:31:37 2009
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
@@ -49,11 +50,11 @@
     final long softLease = 1000;
     final long hardLease = 60 * 60 *1000;
     final short repl = 3;
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     final int bufferSize = conf.getInt("io.file.buffer.size", 4096);
-    conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt("dfs.heartbeat.interval", 1);
-  //  conf.setInt("io.bytes.per.checksum", 16);
+  //  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 16);
 
     MiniDFSCluster cluster = null;
     DistributedFileSystem dfs = null;
@@ -91,7 +92,7 @@
       // try to re-open the file before closing the previous handle. This
       // should fail but will trigger lease recovery.
       {
-        Configuration conf2 = new Configuration(conf);
+        Configuration conf2 = new HdfsConfiguration(conf);
         String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
         UnixUserGroupInformation.saveToConf(conf2,
             UnixUserGroupInformation.UGI_PROPERTY_NAME,

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java Thu Oct  1 05:31:37 2009
@@ -60,7 +60,7 @@
    * Tests get/set working directory in DFS.
    */
   public void testWorkingDirectory() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     try {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Thu Oct  1 05:31:37 2009
@@ -45,9 +45,9 @@
     MiniDFSCluster cluster = null;
     
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       //minimize test delay
-      conf.setInt("dfs.replication.interval", 0);
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
       int fileLen = 10*1024;
 
       //start a cluster with single datanode
@@ -86,7 +86,7 @@
 
 
       // Now verify that it shows up on webui
-      URL url = new URL("http://" + conf.get("dfs.http.address") + 
+      URL url = new URL("http://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY) + 
                         "/dfshealth.jsp");
       String dfsFrontPage = DFSTestUtil.urlGet(url);
       String warnStr = "WARNING : There are about ";

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java Thu Oct  1 05:31:37 2009
@@ -74,7 +74,7 @@
    * Tests modification time in DFS.
    */
   public void testModTime() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
     cluster.waitActive();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java Thu Oct  1 05:31:37 2009
@@ -201,9 +201,9 @@
   }
   
   private void dfsPreadTest(boolean disableTransferTo) throws IOException {
-    Configuration conf = new Configuration();
-    conf.setLong("dfs.block.size", 4096);
-    conf.setLong("dfs.read.prefetch.size", 4096);
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
+    conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
     if (simulatedStorage) {
       conf.setBoolean("dfs.datanode.simulateddatastorage", true);
     }
@@ -234,7 +234,7 @@
    * Tests positional read in LocalFS.
    */
   public void testPreadLocalFS() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     FileSystem fileSys = FileSystem.getLocal(conf);
     try {
       Path file1 = new Path("build/test/data", "preadtest.dat");

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java Thu Oct  1 05:31:37 2009
@@ -56,10 +56,10 @@
    *    setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count 
    */
   public void testQuotaCommands() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     // set a smaller block size so that we can test with smaller 
     // Space quotas
-    conf.set("dfs.block.size", "512");
+    conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
     conf.setBoolean("dfs.support.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
@@ -262,7 +262,7 @@
   /** Test commands that change the size of the name space:
    *  mkdirs, rename, and delete */
   public void testNamespaceCommands() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -430,10 +430,10 @@
    * This is based on testNamespaceCommands() above.
    */
   public void testSpaceCommands() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     // set a smaller block size so that we can test with smaller 
     // diskspace quotas
-    conf.set("dfs.block.size", "512");
+    conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
     conf.setBoolean("dfs.support.append", true);
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     final FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java Thu Oct  1 05:31:37 2009
@@ -28,6 +28,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
@@ -48,7 +49,7 @@
   /** Test reading while writing. */
   @Test
   public void testReadWhileWriting() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     //enable append
     conf.setBoolean("dfs.support.append", true);
 
@@ -106,7 +107,7 @@
   static void checkFile(Path p, int expectedsize, Configuration conf
       ) throws IOException {
     //open the file with another user account
-    final Configuration conf2 = new Configuration(conf);
+    final Configuration conf2 = new HdfsConfiguration(conf);
     final String username = UserGroupInformation.getCurrentUGI().getUserName()
         + "_" + ++userCount;
     UnixUserGroupInformation.saveToConf(conf2,

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java Thu Oct  1 05:31:37 2009
@@ -47,12 +47,12 @@
    * move /user/dir1 /user/dir3
    */
   public void testWhileOpenRenameParent() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
     conf.setBoolean("dfs.support.append", true);
 
     // create cluster
@@ -131,12 +131,12 @@
    * move /user/dir1 /user/dir3
    */
   public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
     conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 2************************************");
 
@@ -204,12 +204,12 @@
    * move /user/dir1/file1 /user/dir2/
    */
   public void testWhileOpenRenameToExistentDirectory() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
     conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 3************************************");
 
@@ -267,12 +267,12 @@
    * move /user/dir1/file1 /user/dir2/
    */
   public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
-    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
     conf.setBoolean("dfs.support.append", true);
     System.out.println("Test 4************************************");
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java Thu Oct  1 05:31:37 2009
@@ -146,7 +146,7 @@
    * Test if Datanode reports bad blocks during replication request
    */
   public void testBadBlockReportOnTransfer() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
@@ -191,8 +191,8 @@
    * Tests replication in DFS.
    */
   public void runReplication(boolean simulated) throws IOException {
-    Configuration conf = new Configuration();
-    conf.setBoolean("dfs.replication.considerLoad", false);
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
     if (simulated) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
@@ -307,7 +307,7 @@
     }
     
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       conf.set("dfs.replication", Integer.toString(numDataNodes));
       //first time format
       cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
@@ -373,11 +373,11 @@
        */
       
       LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.set("dfs.replication", Integer.toString(numDataNodes));
-      conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
+      conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
       conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
-      conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.75f"); // only 3 copies exist
       
       cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
                                    true, null, null);
@@ -401,7 +401,7 @@
    * @throws Exception
    */
   public void testReplicateLenMismatchedBlock() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 2, true, null);
     try {
       cluster.waitActive();
       // test truncated block
@@ -418,7 +418,7 @@
     final Path fileName = new Path("/file1");
     final short REPLICATION_FACTOR = (short)1;
     final FileSystem fs = cluster.getFileSystem();
-    final int fileLen = fs.getConf().getInt("io.bytes.per.checksum", 512);
+    final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
     DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
     DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java Thu Oct  1 05:31:37 2009
@@ -31,7 +31,7 @@
 public class TestRestartDFS extends TestCase {
   /** check if DFS remains in proper condition after a restart */
   public void testRestartDFS() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java Thu Oct  1 05:31:37 2009
@@ -55,9 +55,9 @@
     MiniDFSCluster cluster = null;
     DistributedFileSystem fs = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       // disable safemode extension to make the test run faster.
-      conf.set("dfs.safemode.extension", "1");
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, "1");
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitActive();
       

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java Thu Oct  1 05:31:37 2009
@@ -123,7 +123,7 @@
    * Test if the seek bug exists in FSDataInputStream in DFS.
    */
   public void testSeekBugDFS() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -142,7 +142,7 @@
    * Tests if the seek bug exists in FSDataInputStream in LocalFS.
    */
   public void testSeekBugLocalFS() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     FileSystem fileSys = FileSystem.getLocal(conf);
     try {
       Path file1 = new Path("build/test/data", "seektest.dat");

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java Thu Oct  1 05:31:37 2009
@@ -77,10 +77,10 @@
    * Tests mod & access time in DFS.
    */
   public void testTimes() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
 
 
@@ -187,13 +187,13 @@
    * Tests mod time change at close in DFS.
    */
   public void testTimesAtClose() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     int replicas = 1;
 
     // parameter initialization
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
-    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.datanode.handler.count", 50);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java Thu Oct  1 05:31:37 2009
@@ -26,13 +26,13 @@
 
 public class TestSetrepIncreasing extends TestCase {
   static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     conf.set("dfs.replication", "" + fromREP);
     conf.setLong("dfs.blockreport.intervalMsec", 1000L);
-    conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 10, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java Thu Oct  1 05:31:37 2009
@@ -90,11 +90,11 @@
    * Tests small block size in in DFS.
    */
   public void testSmallBlock() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
       conf.setBoolean("dfs.datanode.simulateddatastorage", true);
     }
-    conf.set("io.bytes.per.checksum", "1");
+    conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     try {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Thu Oct  1 05:31:37 2009
@@ -86,9 +86,9 @@
    */
   public static void initialize() throws Exception {
     createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
-    Configuration config = new Configuration();
-    config.set("dfs.name.dir", namenodeStorage.toString());
-    config.set("dfs.data.dir", datanodeStorage.toString());
+    Configuration config = new HdfsConfiguration();
+    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
+    config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
     MiniDFSCluster cluster = null;
     try {
       // format data-node
@@ -157,10 +157,10 @@
       dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
     }
     if (conf == null) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
     }
-    conf.set("dfs.name.dir", nameNodeDirs.toString());
-    conf.set("dfs.data.dir", dataNodeDirs.toString());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameNodeDirs.toString());
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDirs.toString());
     conf.setInt("dfs.blockreport.intervalMsec", 10000);
     return conf;
   }
@@ -263,7 +263,7 @@
     for (int i = 0; i < parents.length; i++) {
       File newDir = new File(parents[i], dirName);
       createEmptyDirs(new String[] {newDir.toString()});
-      LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
+      LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
       switch (nodeType) {
       case NAME_NODE:
         localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Thu Oct  1 05:31:37 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -33,6 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
@@ -60,8 +62,8 @@
   }
 
   private void initConf(Configuration conf) {
-    conf.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    conf.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
     conf.setLong("dfs.heartbeat.interval", 1L);
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     conf.setLong("dfs.balancer.movedWinWidth", 2000L);
@@ -164,7 +166,7 @@
         blocks, (short)(numDatanodes-1), distribution);
 
     // restart the cluster: do NOT format the cluster
-    conf.set("dfs.safemode.threshold.pct", "0.0f"); 
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
     cluster = new MiniDFSCluster(0, conf, numDatanodes,
         false, true, null, racks, capacities);
     cluster.waitActive();
@@ -281,7 +283,7 @@
   /** Test a cluster with even distribution, 
    * then a new empty node is added to the cluster*/
   public void testBalancer0() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     initConf(conf);
     oneNodeTest(conf);
     twoNodeTest(conf);
@@ -289,7 +291,7 @@
 
   /** Test unevenly distributed cluster */
   public void testBalancer1() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     initConf(conf);
     testUnevenDistribution(conf,
         new long[] {50*CAPACITY/100, 10*CAPACITY/100},

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java Thu Oct  1 05:31:37 2009
@@ -25,6 +25,7 @@
 
 import static org.apache.hadoop.hdfs.protocol.FSConstants.LAYOUT_VERSION;
 
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -100,7 +101,7 @@
     UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
     UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
 
-    conf = new Configuration();
+    conf = new HdfsConfiguration();
     if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
       System.setProperty("test.build.data", "build/test/data");
     }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Thu Oct  1 05:31:37 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
@@ -312,7 +313,7 @@
 
   public void setConf(Configuration iconf)  {
     conf = iconf;
-    storageId = conf.get("StorageId", "unknownStorageId" +
+    storageId = conf.get(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, "unknownStorageId" +
                                         new Random().nextInt());
     registerMBean(storageId);
     storage = new SimulatedStorage(



Mime
View raw message