hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r820536 [2/4] - in /hadoop/hdfs/branches/branch-0.21: ./ src/ant/org/apache/hadoop/ant/ src/contrib/fuse-dfs/src/test/ src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/ s...
Date Thu, 01 Oct 2009 05:31:40 GMT
Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java Thu Oct  1 05:31:37 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import java.util.*;
 
 /** The class is responsible for choosing the desired number of targets
@@ -52,7 +53,7 @@
   /** {@inheritDoc} */
   public void initialize(Configuration conf,  FSClusterStats stats,
                          NetworkTopology clusterMap) {
-    this.considerLoad = conf.getBoolean("dfs.replication.considerLoad", true);
+    this.considerLoad = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
     this.stats = stats;
     this.clusterMap = clusterMap;
   }

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java Thu Oct  1 05:31:37 2009
@@ -34,6 +34,7 @@
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer;
 
@@ -86,8 +87,10 @@
     shouldRun = true;
 
     // Initialize other scheduling parameters from the configuration
-    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
-    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
+    checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
+                                    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+    checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
+                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
 
     HttpServer httpServer = backupNode.httpServer;
     httpServer.setAttribute("name.system.image", getFSImage());

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java Thu Oct  1 05:31:37 2009
@@ -35,6 +35,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -66,7 +67,7 @@
       ) throws IOException {
     ServletContext context = getServletContext();
     InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
-    Configuration conf = new Configuration(
+    Configuration conf = new HdfsConfiguration(
         (Configuration)context.getAttribute("name.conf"));
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Thu Oct  1 05:31:37 2009
@@ -23,6 +23,7 @@
 import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -79,7 +80,7 @@
     try {
       this.backupNode =
         (NamenodeProtocol) RPC.getProxy(NamenodeProtocol.class,
-            NamenodeProtocol.versionID, bnAddress, new Configuration());
+            NamenodeProtocol.versionID, bnAddress, new HdfsConfiguration());
     } catch(IOException e) {
       Storage.LOG.error("Error connecting to: " + bnAddress, e);
       throw e;

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Thu Oct  1 05:31:37 2009
@@ -38,6 +38,7 @@
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
 import org.apache.hadoop.metrics.MetricsUtil;
@@ -62,7 +63,8 @@
   /** Access an existing dfs name directory. */
   FSDirectory(FSNamesystem ns, Configuration conf) {
     this(new FSImage(), ns, conf);
-    if(conf.getBoolean("dfs.name.dir.restore", false)) {
+    if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, 
+                       DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
       NameNode.LOG.info("set FSImage.restoreFailedStorage");
       fsImage.setRestoreFailedStorage(true);
     }
@@ -90,7 +92,7 @@
   private void initialize(Configuration conf) {
     MetricsContext metricsContext = MetricsUtil.getContext("dfs");
     directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
-    directoryMetrics.setTag("sessionId", conf.get("session.id"));
+    directoryMetrics.setTag("sessionId", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
   }
 
   void loadFSImage(Collection<URI> dataDirs,

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Thu Oct  1 05:31:37 2009
@@ -64,6 +64,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.Writable;
 
 /**
@@ -373,12 +374,12 @@
     if(startOpt == StartupOption.IMPORT 
         && (checkpointDirs == null || checkpointDirs.isEmpty()))
       throw new IOException("Cannot import image from a checkpoint. "
-                          + "\"fs.checkpoint.dir\" is not set." );
+                          + "\"dfs.namenode.checkpoint.dir\" is not set." );
 
     if(startOpt == StartupOption.IMPORT 
         && (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()))
       throw new IOException("Cannot import image from a checkpoint. "
-                          + "\"fs.checkpoint.edits.dir\" is not set." );
+                          + "\"dfs.namenode.checkpoint.dir\" is not set." );
     
     setStorageDirectories(dataDirs, editsDirs);
     // 1. For each data directory calculate its state and 
@@ -1901,7 +1902,7 @@
    */
   static Collection<URI> getCheckpointDirs(Configuration conf,
       String defaultValue) {
-    Collection<String> dirNames = conf.getStringCollection("fs.checkpoint.dir");
+    Collection<String> dirNames = conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     if (dirNames.size() == 0 && defaultValue != null) {
       dirNames.add(defaultValue);
     }
@@ -1927,7 +1928,7 @@
   static Collection<URI> getCheckpointEditsDirs(Configuration conf,
       String defaultName) {
     Collection<String> dirNames = 
-      conf.getStringCollection("fs.checkpoint.edits.dir");
+      conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     if (dirNames.size() == 0 && defaultName != null) {
       dirNames.add(defaultName);
     }

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Oct  1 05:31:37 2009
@@ -53,6 +53,8 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -291,7 +293,8 @@
     dnthread.start();
 
     this.dnsToSwitchMapping = ReflectionUtils.newInstance(
-        conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
+        conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
+                      ScriptBasedMapping.class,
             DNSToSwitchMapping.class), conf);
     
     /* If the dns to swith mapping supports cache, resolve network 
@@ -305,7 +308,7 @@
   }
 
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
-    return getStorageDirs(conf, "dfs.name.dir");
+    return getStorageDirs(conf, DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
   }
 
   public static Collection<URI> getStorageDirs(Configuration conf,
@@ -317,7 +320,7 @@
       // but will retain directories specified in hdfs-site.xml
       // When importing image from a checkpoint, the name-node can
       // start with empty set of storage directories.
-      Configuration cE = new Configuration(false);
+      Configuration cE = new HdfsConfiguration(false);
       cE.addResource("core-default.xml");
       cE.addResource("core-site.xml");
       cE.addResource("hdfs-default.xml");
@@ -356,7 +359,7 @@
   }
 
   public static Collection<URI> getNamespaceEditsDirs(Configuration conf) {
-    return getStorageDirs(conf, "dfs.name.edits.dir");
+    return getStorageDirs(conf, DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
   }
 
   /**
@@ -400,31 +403,37 @@
     }
     LOG.info("fsOwner=" + fsOwner);
 
-    this.supergroup = conf.get("dfs.permissions.supergroup", "supergroup");
-    this.isPermissionEnabled = conf.getBoolean("dfs.permissions", true);
+    this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, 
+                               DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+    this.isPermissionEnabled = conf.getBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
+                                               DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
     LOG.info("supergroup=" + supergroup);
     LOG.info("isPermissionEnabled=" + isPermissionEnabled);
-    short filePermission = (short)conf.getInt("dfs.upgrade.permission", 00777);
+    short filePermission = (short)conf.getInt(DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
+                                              DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
     this.defaultPermission = PermissionStatus.createImmutable(
         fsOwner.getUserName(), supergroup, new FsPermission(filePermission));
 
     long heartbeatInterval = conf.getLong("dfs.heartbeat.interval", 3) * 1000;
     this.heartbeatRecheckInterval = conf.getInt(
-        "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
     this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
       10 * heartbeatInterval;
     this.replicationRecheckInterval = 
-      conf.getInt("dfs.replication.interval", 3) * 1000L;
+      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
+                  DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
     this.serverDefaults = new FsServerDefaults(
-        conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE),
-        conf.getInt("io.bytes.per.checksum", DEFAULT_BYTES_PER_CHECKSUM),
-        conf.getInt("dfs.write.packet.size", DEFAULT_WRITE_PACKET_SIZE),
+        conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE),
+        conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BYTES_PER_CHECKSUM),
+        conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DEFAULT_WRITE_PACKET_SIZE),
         (short) conf.getInt("dfs.replication", DEFAULT_REPLICATION_FACTOR),
         conf.getInt("io.file.buffer.size", DEFAULT_FILE_BUFFER_SIZE));
-    this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
+    this.maxFsObjects = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY, 
+                                     DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
     this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
                                          20*(int)(heartbeatInterval/1000));
-    this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
+    this.accessTimePrecision = conf.getLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
     this.supportAppends = conf.getBoolean("dfs.support.append", false);
     this.isAccessTokenEnabled = conf.getBoolean(
         AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false);
@@ -3007,7 +3016,7 @@
     // Reread the config to get dfs.hosts and dfs.hosts.exclude filenames.
     // Update the file names and refresh internal includes and excludes list
     if (conf == null)
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
     hostsReader.updateFileNames(conf.get("dfs.hosts",""), 
                                 conf.get("dfs.hosts.exclude", ""));
     hostsReader.refresh();
@@ -3160,9 +3169,10 @@
      * @param conf configuration
      */
     SafeModeInfo(Configuration conf) {
-      this.threshold = conf.getFloat("dfs.safemode.threshold.pct", 0.95f);
-      this.extension = conf.getInt("dfs.safemode.extension", 0);
-      this.safeReplication = conf.getInt("dfs.replication.min", 1);
+      this.threshold = conf.getFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 0.95f);
+      this.extension = conf.getInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
+      this.safeReplication = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 
+                                         DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
       this.blockTotal = 0; 
       this.blockSafe = 0;
     }

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Thu Oct  1 05:31:37 2009
@@ -35,6 +35,8 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -81,8 +83,8 @@
       final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
       xml.declaration();
 
-      final Configuration conf = new Configuration(DataNode.getDataNode().getConf());
-      final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
+      final Configuration conf = new HdfsConfiguration(DataNode.getDataNode().getConf());
+      final int socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
       final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
       UnixUserGroupInformation.saveToConf(conf,
           UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
@@ -99,4 +101,4 @@
       xml.endDocument();
     }
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java Thu Oct  1 05:31:37 2009
@@ -26,6 +26,7 @@
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -48,7 +49,7 @@
     UserGroupInformation.setCurrentUser(ugi);
 
     final ServletContext context = getServletContext();
-    final Configuration conf = new Configuration((Configuration) context.getAttribute("name.conf"));
+    final Configuration conf = new HdfsConfiguration((Configuration) context.getAttribute("name.conf"));
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
 

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Oct  1 05:31:37 2009
@@ -63,6 +63,8 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.ipc.RPC;
@@ -244,11 +246,11 @@
 
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
     return  NetUtils.createSocketAddr(
-        conf.get("dfs.http.address", "0.0.0.0:50070"));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:50070"));
   }
 
   protected void setHttpServerAddress(Configuration conf){
-    conf.set("dfs.http.address", getHostPortString(httpAddress));
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, getHostPortString(httpAddress));
   }
 
   protected void loadNamesystem(Configuration conf) throws IOException {
@@ -337,10 +339,11 @@
     this.httpServer = new HttpServer("hdfs", infoHost, infoPort, 
         infoPort == 0, conf);
     if (conf.getBoolean("dfs.https.enable", false)) {
-      boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+      boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
+                                               DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
-          "dfs.https.address", infoHost + ":" + 0));
-      Configuration sslConf = new Configuration(false);
+          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
+      Configuration sslConf = new HdfsConfiguration(false);
       sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
           "ssl-server.xml"));
       this.httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
@@ -828,11 +831,11 @@
 
   /**
    * Refresh the list of datanodes that the namenode should allow to  
-   * connect.  Re-reads conf by creating new Configuration object and 
+   * connect.  Re-reads conf by creating new HdfsConfiguration object and 
    * uses the files list in the configuration to update the list. 
    */
   public void refreshNodes() throws IOException {
-    namesystem.refreshNodes(new Configuration());
+    namesystem.refreshNodes(new HdfsConfiguration());
   }
 
   /**
@@ -1163,7 +1166,7 @@
   public static NameNode createNameNode(String argv[], 
                                  Configuration conf) throws IOException {
     if (conf == null)
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
     StartupOption startOpt = parseArguments(argv);
     if (startOpt == null) {
       printUsage();

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu Oct  1 05:31:37 2009
@@ -38,6 +38,8 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -117,7 +119,7 @@
    */
   private void initialize(Configuration conf) throws IOException {
     // initiate Java VM metrics
-    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
+    JvmMetrics.init("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
     
     // Create connection to the namenode.
     shouldRun = true;
@@ -138,12 +140,15 @@
     checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
 
     // Initialize other scheduling parameters from the configuration
-    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
-    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
+    checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
+                                    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+    checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
+                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
 
     // initialize the webserver for uploading files.
     InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
-        conf.get("dfs.secondary.http.address", "0.0.0.0:50090"));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+                 DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
     infoBindAddress = infoSocAddr.getHostName();
     int tmpInfoPort = infoSocAddr.getPort();
     infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
@@ -156,7 +161,7 @@
 
     // The web-server port can be ephemeral... ensure we have the correct info
     infoPort = infoServer.getPort();
-    conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); 
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
     LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
              "(" + checkpointPeriod/60 + " min)");
@@ -280,7 +285,8 @@
     if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
       throw new IOException("This is not a DFS");
     }
-    String configuredAddress = conf.get("dfs.http.address", "0.0.0.0:50070");
+    String configuredAddress = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+                                        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
     InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
     if (sockAddr.getAddress().isAnyLocalAddress()) {
       return fsName.getHost() + ":" + sockAddr.getPort();
@@ -455,7 +461,7 @@
    */
   public static void main(String[] argv) throws Exception {
     StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
-    Configuration tconf = new Configuration();
+    Configuration tconf = new HdfsConfiguration();
     if (argv.length >= 1) {
       SecondaryNameNode secondary = new SecondaryNameNode(tconf);
       int ret = secondary.processArgs(argv);

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Thu Oct  1 05:31:37 2009
@@ -30,6 +30,7 @@
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.mortbay.jetty.InclusiveByteRange;
@@ -40,7 +41,7 @@
 
   static InetSocketAddress nameNodeAddr;
   static DataNode datanode = null;
-  private static final Configuration masterConf = new Configuration();
+  private static final Configuration masterConf = new HdfsConfiguration();
   static {
     if ((datanode = DataNode.getDataNode()) != null) {
       nameNodeAddr = datanode.getNameNodeAddr();
@@ -50,7 +51,7 @@
   /** getting a client for connecting to dfs */
   protected DFSClient getDFSClient(HttpServletRequest request)
       throws IOException {
-    Configuration conf = new Configuration(masterConf);
+    Configuration conf = new HdfsConfiguration(masterConf);
     UnixUserGroupInformation.saveToConf(conf,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, getUGI(request));
     return new DFSClient(nameNodeAddr, conf);

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java Thu Oct  1 05:31:37 2009
@@ -22,6 +22,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics.*;
 import org.apache.hadoop.metrics.util.MetricsBase;
 import org.apache.hadoop.metrics.util.MetricsIntValue;
@@ -67,7 +68,7 @@
 
   public FSNamesystemMetrics(FSNamesystem fsNameSystem, Configuration conf) {
     this.fsNameSystem = fsNameSystem;
-    String sessionId = conf.get("session.id");
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
      
     // Create a record for FSNamesystem metrics
     MetricsContext metricsContext = MetricsUtil.getContext("dfs");

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Thu Oct  1 05:31:37 2009
@@ -21,6 +21,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics.*;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.metrics.util.MetricsBase;
@@ -86,7 +87,7 @@
 
       
     public NameNodeMetrics(Configuration conf, NamenodeRole nameNodeRole) {
-      String sessionId = conf.get("session.id");
+      String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
       // Initiate Java VM metrics
       String processName = nameNodeRole.toString();
       JvmMetrics.init(processName, sessionId);

Modified: hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/java/org/apache/hadoop/hdfs/tools/DFSck.java Thu Oct  1 05:31:37 2009
@@ -30,6 +30,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Tool;
@@ -106,7 +108,8 @@
     }
 
     final StringBuffer url = new StringBuffer("http://");
-    url.append(getConf().get("dfs.http.address", "0.0.0.0:50070"));
+    url.append(getConf().get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, 
+                             DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
     url.append("/fsck?ugi=").append(ugi).append("&path=");
 
     String dir = "/";
@@ -162,7 +165,7 @@
     if ((args.length == 0 ) || ("-files".equals(args[0]))) 
       printUsage();
     else
-      res = ToolRunner.run(new DFSck(new Configuration()), args);
+      res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
     System.exit(res);
   }
 }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/fi/FiConfig.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/fi/FiConfig.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/fi/FiConfig.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/fi/FiConfig.java Thu Oct  1 05:31:37 2009
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fi;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /**
  * This class wraps the logic around fault injection configuration file
@@ -37,7 +38,7 @@
   
   protected static void init () {
     if (conf == null) {
-      conf = new Configuration(false);
+      conf = new HdfsConfiguration(false);
       String configName = System.getProperty(CONFIG_PARAMETER, DEFAULT_CONFIG);
       conf.addResource(configName);
     }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java Thu Oct  1 05:31:37 2009
@@ -20,6 +20,8 @@
 
 import org.junit.Test;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.fi.FiHFlushTestUtil;
 import org.apache.hadoop.fi.FiTestUtil;
 import org.apache.hadoop.fi.FiHFlushTestUtil.DerrAction;
@@ -62,7 +64,7 @@
   @Test
   public void hFlushFi01_a() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runDiskErrorTest(new Configuration(), methodName, 
+    runDiskErrorTest(new HdfsConfiguration(), methodName, 
         AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 0));
   }
 
@@ -75,11 +77,11 @@
   @Test(expected = IOException.class)
   public void hFlushFi01_b() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName, 
         customBlockSize, new DerrAction(methodName, 0));
   }
@@ -90,11 +92,11 @@
   @Test(expected = IOException.class)
   public void hFlushFi01_c() throws IOException { 
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName, 
         customBlockSize, new DerrAction(methodName, 0));
   }
@@ -104,7 +106,7 @@
   @Test
   public void hFlushFi02_a() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runDiskErrorTest(new Configuration(), methodName,
+    runDiskErrorTest(new HdfsConfiguration(), methodName,
         AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 1));
   }
 
@@ -113,11 +115,11 @@
 @Test(expected = IOException.class)
   public void hFlushFi02_b() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 1));
   }
@@ -127,11 +129,11 @@
   @Test(expected = IOException.class)
   public void hFlushFi02_c() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 1));
   }
@@ -141,7 +143,7 @@
   @Test
   public void hFlushFi03_a() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runDiskErrorTest(new Configuration(), methodName,
+    runDiskErrorTest(new HdfsConfiguration(), methodName,
         AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 2));
   }
   
@@ -150,11 +152,11 @@
   @Test(expected = IOException.class)
   public void hFlushFi03_b() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 2));
   }
@@ -164,11 +166,11 @@
   @Test(expected = IOException.class)
   public void hFlushFi03_c() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     int customPerChecksumSize = 400;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     runDiskErrorTest(conf, methodName,
         customBlockSize, new DerrAction(methodName, 2));
   }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java Thu Oct  1 05:31:37 2009
@@ -34,6 +34,8 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 import org.junit.Assert;
 import org.junit.Test;
@@ -43,11 +45,11 @@
   static final short REPLICATION = 3;
   static final long BLOCKSIZE = 1L * (1L << 20);
 
-  static final Configuration conf = new Configuration();
+  static final Configuration conf = new HdfsConfiguration();
   static {
     conf.setInt("dfs.datanode.handler.count", 1);
     conf.setInt("dfs.replication", REPLICATION);
-    conf.setInt("dfs.socket.timeout", 5000);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
   }
 
   static private FSDataOutputStream createFile(FileSystem fs, Path p
@@ -298,4 +300,4 @@
     final String methodName = FiTestUtil.getMethodName();
     runCallReceivePacketTest(methodName, 2, new DoosAction(methodName, 2));
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java Thu Oct  1 05:31:37 2009
@@ -23,6 +23,7 @@
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -53,7 +54,7 @@
                        "host5", "host6", "host7", "host8" };
     dfsCluster = new MiniDFSCluster(conf, 8, true, racks, hosts);
     
-    namenode = conf.get("fs.default.name", "file:///");
+    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
     
     username = System.getProperty("user.name");
     dfsAdmCmdExecutor = new DFSAdminCmdExecutor(namenode);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java Thu Oct  1 05:31:37 2009
@@ -21,6 +21,7 @@
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 
 import junit.framework.TestCase;
@@ -48,7 +49,7 @@
   
   protected void setUp() throws Exception {
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       dfsCluster = new MiniDFSCluster(conf, 1, true, null);
       fs = FileSystem.get(conf);
     } catch (IOException e) {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Thu Oct  1 05:31:37 2009
@@ -23,6 +23,7 @@
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.junit.After;
@@ -40,7 +41,7 @@
   @BeforeClass
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException  {
-    cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
+    cluster = new MiniDFSCluster(new HdfsConfiguration(), 2, true, null);
     fc = FileContext.getFileContext(cluster.getFileSystem());
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UnixUserGroupInformation.login().getUserName()));

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java Thu Oct  1 05:31:37 2009
@@ -28,6 +28,7 @@
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
@@ -48,7 +49,7 @@
    */
   public void testDfsUrls() throws IOException {
 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
 
@@ -106,7 +107,7 @@
    */
   public void testFileUrls() throws IOException, URISyntaxException {
     // URLStreamHandler is already set in JVM by testDfsUrls() 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     // Locate the test temporary directory.
     File tmpDir = new File(conf.get("hadoop.tmp.dir"));

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java Thu Oct  1 05:31:37 2009
@@ -23,6 +23,8 @@
 import java.io.FileWriter;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 
 import junit.framework.TestCase;
@@ -30,7 +32,7 @@
  * This class tests if a balancer schedules tasks correctly.
  */
 public class TestLoadGenerator extends TestCase {
-  private static final Configuration CONF = new Configuration();
+  private static final Configuration CONF = new HdfsConfiguration();
   private static final int DEFAULT_BLOCK_SIZE = 10;
   private static final String OUT_DIR = 
     System.getProperty("test.build.data","build/test/data");
@@ -47,8 +49,8 @@
   
 
   static {
-    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    CONF.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
+    CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
     CONF.setLong("dfs.heartbeat.interval", 1L);
   }
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java Thu Oct  1 05:31:37 2009
@@ -27,7 +27,9 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -160,8 +162,8 @@
   public void testGeneralSBBehavior() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions", true);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       conf.setBoolean("dfs.support.append", true);
       cluster = new MiniDFSCluster(conf, 4, true, null);
 
@@ -200,8 +202,8 @@
 
     try {
       // Set up cluster for testing
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions", true);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem hdfs = cluster.getFileSystem();
 
@@ -246,8 +248,8 @@
   public void testStickyBitPersistence() throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
-      conf.setBoolean("dfs.permissions", true);
+      Configuration conf = new HdfsConfiguration();
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem hdfs = cluster.getFileSystem();
 
@@ -293,7 +295,7 @@
    */
   static private FileSystem logonAs(UnixUserGroupInformation user,
       Configuration conf, FileSystem hdfs) throws IOException {
-    Configuration conf2 = new Configuration(conf);
+    Configuration conf2 = new HdfsConfiguration(conf);
     UnixUserGroupInformation.saveToConf(conf2,
         UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/AppendTestUtil.java Thu Oct  1 05:31:37 2009
@@ -31,6 +31,7 @@
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /** Utilities for append-related tests */ 
 public class AppendTestUtil {
@@ -94,7 +95,7 @@
    */
   public static FileSystem createHdfsWithDifferentUsername(Configuration conf
       ) throws IOException {
-    Configuration conf2 = new Configuration(conf);
+    Configuration conf2 = new HdfsConfiguration(conf);
     String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
     UnixUserGroupInformation.saveToConf(conf2,
         UnixUserGroupInformation.UGI_PROPERTY_NAME,

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java Thu Oct  1 05:31:37 2009
@@ -226,7 +226,7 @@
    * @param args
    */
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(),
+    int res = ToolRunner.run(new HdfsConfiguration(),
         new BenchmarkThroughput(), args);
     System.exit(res);
   }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Thu Oct  1 05:31:37 2009
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessToken;
 import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -155,7 +156,7 @@
   /** check if the files have been copied correctly. */
   public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
     
-    //Configuration conf = new Configuration();
+    //Configuration conf = new HdfsConfiguration();
     Path root = new Path(topdir);
     
     for (int idx = 0; idx < nFiles; idx++) {
@@ -284,7 +285,7 @@
 
   static public Configuration getConfigurationWithDifferentUsername(Configuration conf
       ) throws IOException {
-    final Configuration c = new Configuration(conf);
+    final Configuration c = new HdfsConfiguration(conf);
     final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
     final String username = ugi.getUserName()+"_XXX";
     final String[] groups = {ugi.getGroupNames()[0] + "_XXX"};

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java Thu Oct  1 05:31:37 2009
@@ -98,7 +98,7 @@
     int numBlocksPerDNtoInject = 0;
     int replication = 1;
     
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
 
     for (int i = 0; i < args.length; i++) { // parse command line
       if (args[i].equals("-n")) {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Oct  1 05:31:37 2009
@@ -44,6 +44,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
@@ -254,17 +255,17 @@
     
     // Setup the NameNode configuration
     FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
-    conf.set("dfs.http.address", "127.0.0.1:0");  
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");  
     if (manageNameDfsDirs) {
-      conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
+      conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name1").getPath()+","+
                new File(base_dir, "name2").getPath());
-      conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1").
+      conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, new File(base_dir, "namesecondary1").
                 getPath()+"," + new File(base_dir, "namesecondary2").getPath());
     }
     
     int replication = conf.getInt("dfs.replication", 3);
     conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
-    conf.setInt("dfs.safemode.extension", 0);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
     conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second
     
     // Format and clean out DataNode directories
@@ -280,7 +281,7 @@
                      operation == StartupOption.FORMAT ||
                      operation == StartupOption.REGULAR) ?
       new String[] {} : new String[] {operation.getName()};
-    conf.setClass("topology.node.switch.mapping.impl", 
+    conf.setClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                    StaticMapping.class, DNSToSwitchMapping.class);
     nameNode = NameNode.createNameNode(args, conf);
     
@@ -386,7 +387,7 @@
     
     
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
-      Configuration dnConf = new Configuration(conf);
+      Configuration dnConf = new HdfsConfiguration(conf);
       if (manageDfsDirs) {
         File dir1 = new File(data_dir, "data"+(2*i+1));
         File dir2 = new File(data_dir, "data"+(2*i+2));
@@ -396,7 +397,7 @@
           throw new IOException("Mkdirs failed to create directory for DataNode "
                                 + i + ": " + dir1 + " or " + dir2);
         }
-        dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); 
+        dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dir1.getPath() + "," + dir2.getPath()); 
       }
       if (simulatedCapacities != null) {
         dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);
@@ -404,11 +405,11 @@
             simulatedCapacities[i-curDatanodesNum]);
       }
       System.out.println("Starting DataNode " + i + " with dfs.data.dir: " 
-                         + dnConf.get("dfs.data.dir"));
+                         + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
       if (hosts != null) {
-        dnConf.set("slave.host.name", hosts[i - curDatanodesNum]);
+        dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
         System.out.println("Starting DataNode " + i + " with hostname set to: " 
-                           + dnConf.get("slave.host.name"));
+                           + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
       }
       if (racks != null) {
         String name = hosts[i - curDatanodesNum];
@@ -417,7 +418,7 @@
         StaticMapping.addNodeToRack(name,
                                     racks[i-curDatanodesNum]);
       }
-      Configuration newconf = new Configuration(dnConf); // save config
+      Configuration newconf = new HdfsConfiguration(dnConf); // save config
       if (hosts != null) {
         NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
       }
@@ -709,7 +710,7 @@
       boolean keepPort) throws IOException {
     Configuration conf = dnprop.conf;
     String[] args = dnprop.dnArgs;
-    Configuration newconf = new Configuration(conf); // save cloned config
+    Configuration newconf = new HdfsConfiguration(conf); // save cloned config
     if (keepPort) {
       InetSocketAddress addr = dnprop.datanode.getSelfAddr();
       conf.set("dfs.datanode.address", addr.getAddress().getHostAddress() + ":"

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java Thu Oct  1 05:31:37 2009
@@ -31,7 +31,7 @@
 public class TestAbandonBlock extends junit.framework.TestCase {
   public static final Log LOG = LogFactory.getLog(TestAbandonBlock.class);
   
-  private static final Configuration CONF = new Configuration();
+  private static final Configuration CONF = new HdfsConfiguration();
   static final String FILE_NAME_PREFIX
       = "/" + TestAbandonBlock.class.getSimpleName() + "_"; 
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java Thu Oct  1 05:31:37 2009
@@ -53,7 +53,7 @@
     LOG.info("Test testBlockMissingException started.");
     long blockSize = 1024L;
     int numBlocks = 4;
-    conf = new Configuration();
+    conf = new HdfsConfiguration();
     try {
       dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
       dfs.waitActive();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java Thu Oct  1 05:31:37 2009
@@ -65,11 +65,11 @@
   private static Configuration conf;
 
   static {
-    conf = new Configuration();
+    conf = new HdfsConfiguration();
     int customPerChecksumSize = 512;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
-    conf.setLong("dfs.block.size", customBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
     conf.setLong("dfs.datanode.directoryscan.interval", DN_RESCAN_INTERVAL);
   }
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Thu Oct  1 05:31:37 2009
@@ -38,7 +38,7 @@
 
   public void testBlocksScheduledCounter() throws IOException {
     
-    MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, 
+    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, 
                                                 true, null);
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Thu Oct  1 05:31:37 2009
@@ -27,6 +27,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
 
 import org.junit.Test;
@@ -39,7 +40,7 @@
   
   @Test public void testGetNewStamp() throws IOException {
     int numDataNodes = 1;
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     conf.setBoolean("dfs.support.append", true);
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
     try {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java Thu Oct  1 05:31:37 2009
@@ -43,8 +43,8 @@
  *  5. Swaps two meta files, i.e the format of the meta files 
  *     are valid but their CRCs do not match with their corresponding 
  *     data blocks
- * The above tests are run for varied values of io.bytes.per.checksum 
- * and dfs.block.size. It tests for the case when the meta file is 
+ * The above tests are run for varied values of dfs.bytes-per-checksum 
+ * and dfs.blocksize. It tests for the case when the meta file is 
  * multiple blocks.
  *
  * Another portion of the test is commented out till HADOOP-1557 
@@ -207,7 +207,7 @@
     // default parameters
     //
     System.out.println("TestCrcCorruption with default parameters");
-    Configuration conf1 = new Configuration();
+    Configuration conf1 = new HdfsConfiguration();
     conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000);
     DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8*1024);
     thistest(conf1, util1);
@@ -216,9 +216,9 @@
     // specific parameters
     //
     System.out.println("TestCrcCorruption with specific parameters");
-    Configuration conf2 = new Configuration();
-    conf2.setInt("io.bytes.per.checksum", 17);
-    conf2.setInt("dfs.block.size", 34);
+    Configuration conf2 = new HdfsConfiguration();
+    conf2.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 17);
+    conf2.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 34);
     DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400);
     thistest(conf2, util2);
   }

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Oct  1 05:31:37 2009
@@ -61,14 +61,14 @@
    */
   public void testWriteTimeoutAtDataNode() throws IOException,
                                                   InterruptedException { 
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     
     final int writeTimeout = 100; //milliseconds.
     // set a very short write timeout for datanode, so that tests runs fast.
     conf.setInt("dfs.datanode.socket.write.timeout", writeTimeout); 
     // set a smaller block size
     final int blockSize = 10*1024*1024;
-    conf.setInt("dfs.block.size", blockSize);
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt("dfs.client.max.block.acquire.failures", 1);
     // set a small buffer size
     final int bufferSize = 4096;
@@ -225,7 +225,7 @@
   
   public void testNotYetReplicatedErrors() throws IOException
   {   
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     
     // allow 1 retry (2 total calls)
     conf.setInt("dfs.client.block.write.locateFollowingBlock.retries", 1);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java Thu Oct  1 05:31:37 2009
@@ -88,11 +88,11 @@
        * For now disabling block verification so that the contents are 
        * not changed.
        */
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
-      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
-      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
       
       log("Finalize with existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java Thu Oct  1 05:31:37 2009
@@ -43,7 +43,7 @@
    * not create a subdirectory off a file.
    */
   public void testDFSMkdirs() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
     try {
@@ -80,7 +80,7 @@
    * Tests mkdir will not create directory when parent is missing.
    */
   public void testMkdir() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     try {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java Thu Oct  1 05:31:37 2009
@@ -38,7 +38,7 @@
 /** Unit tests for permission */
 public class TestDFSPermission extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
-  final private static Configuration conf = new Configuration();
+  final private static Configuration conf = new HdfsConfiguration();
   
   final private static String GROUP1_NAME = "group1";
   final private static String GROUP2_NAME = "group2";
@@ -79,7 +79,7 @@
       LOG.info("NUM_TEST_PERMISSIONS=" + NUM_TEST_PERMISSIONS);
       
       // explicitly turn on permission checking
-      conf.setBoolean("dfs.permissions", true);
+      conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       
       // Initiate all four users
       SUPERUSER = UnixUserGroupInformation.login(conf);

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java Thu Oct  1 05:31:37 2009
@@ -39,7 +39,7 @@
   }
 
   public void testRename() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     try {
       FileSystem fs = cluster.getFileSystem();

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java Thu Oct  1 05:31:37 2009
@@ -121,11 +121,11 @@
     UpgradeUtilities.initialize();
     
     for (int numDirs = 1; numDirs <= 2; numDirs++) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);      
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
-      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
-      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
       
       log("Normal NameNode rollback", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java Thu Oct  1 05:31:37 2009
@@ -93,7 +93,7 @@
   }
 
   public void testZeroSizeFile() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -135,7 +135,7 @@
   }
   
   public void testRecrusiveRm() throws IOException {
-	  Configuration conf = new Configuration();
+	  Configuration conf = new HdfsConfiguration();
 	  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
 	  FileSystem fs = cluster.getFileSystem();
 	  assertTrue("Not a HDFS: " + fs.getUri(), 
@@ -160,7 +160,7 @@
   }
     
   public void testDu() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -209,7 +209,7 @@
                                   
   }
   public void testPut() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -305,7 +305,7 @@
 
   /** check command error outputs and exit statuses. */
   public void testErrOutPut() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {
@@ -447,8 +447,8 @@
   }
   
   public void testURIPaths() throws Exception {
-    Configuration srcConf = new Configuration();
-    Configuration dstConf = new Configuration();
+    Configuration srcConf = new HdfsConfiguration();
+    Configuration dstConf = new HdfsConfiguration();
     MiniDFSCluster srcCluster =  null;
     MiniDFSCluster dstCluster = null;
     String bak = System.getProperty("test.build.data");
@@ -539,7 +539,7 @@
   }
 
   public void testText() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {
@@ -583,7 +583,7 @@
   }
 
   public void testCopyToLocal() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -680,7 +680,7 @@
   }
 
   public void testCount() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
     FsShell shell = new FsShell();
@@ -836,14 +836,14 @@
   }
   
   public void testFilePermissions() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     
     //test chmod on local fs
     FileSystem fs = FileSystem.getLocal(conf);
     testChmod(conf, fs, 
               (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
     
-    conf.set("dfs.permissions", "true");
+    conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
     
     //test chmod on DFS
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -901,7 +901,7 @@
    * Tests various options of DFSShell.
    */
   public void testDFSShell() throws IOException {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     /* This tests some properties of ChecksumFileSystem as well.
      * Make sure that we create ChecksumDFS */
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -1127,7 +1127,7 @@
     MiniDFSCluster dfs = null;
     PrintStream bak = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       dfs = new MiniDFSCluster(conf, 2, true, null);
       FileSystem fs = dfs.getFileSystem();
       Path p = new Path("/foo");
@@ -1160,7 +1160,7 @@
   
   public void testGet() throws IOException {
     DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
-    final Configuration conf = new Configuration();
+    final Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
@@ -1218,7 +1218,7 @@
   }
 
   public void testLsr() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java Thu Oct  1 05:31:37 2009
@@ -37,7 +37,7 @@
     String namenode = null;
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       cluster = new MiniDFSCluster(conf, 1, true, null);
       namenode = FileSystem.getDefaultUri(conf).toString();
       String [] args = new String[4];
@@ -70,7 +70,7 @@
                "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
                "<configuration>\n"+
                " <property>\n"+
-               "   <name>fs.default.name</name>\n"+
+               "   <name>fs.defaultFS</name>\n"+
                "   <value>"+namenode+"</value>\n"+
                " </property>\n"+
                "</configuration>\n");
@@ -91,7 +91,7 @@
   private void testPropertyOption(String[] args, String namenode) {
     // prepare arguments to create a directory /data
     args[0] = "-D";
-    args[1] = "fs.default.name="+namenode;
+    args[1] = "fs.defaultFS="+namenode;
     execute(args, namenode);        
   }
     

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java Thu Oct  1 05:31:37 2009
@@ -169,10 +169,10 @@
   public void testVersions() throws Exception {
     UpgradeUtilities.initialize();
     Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
-                                                      new Configuration());
+                                                      new HdfsConfiguration());
     StorageInfo[] versions = initializeVersions();
     UpgradeUtilities.createStorageDirs(
-                                       NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
+                                       NAME_NODE, conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
     cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
     StorageInfo nameNodeVersion = new StorageInfo(
                                                   UpgradeUtilities.getCurrentLayoutVersion(),
@@ -181,7 +181,7 @@
     log("NameNode version info", NAME_NODE, null, nameNodeVersion);
     for (int i = 0; i < versions.length; i++) {
       File[] storage = UpgradeUtilities.createStorageDirs(
-                                                          DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
+                                                          DATA_NODE, conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY), "current");
       log("DataNode version info", DATA_NODE, i, versions[i]);
       UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
       try {

Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=820536&r1=820535&r2=820536&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java Thu Oct  1 05:31:37 2009
@@ -111,8 +111,8 @@
    */
   String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
     String[] baseDirs = (nodeType == NAME_NODE ?
-                         conf.getStrings("dfs.name.dir") :
-                         conf.getStrings("dfs.data.dir"));
+                         conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY) :
+                         conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
     UpgradeUtilities.createEmptyDirs(baseDirs);
     if (state[0])  // current
       UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
@@ -179,7 +179,7 @@
     UpgradeUtilities.initialize();
 
     for (int numDirs = 1; numDirs <= 2; numDirs++) {
-      conf = new Configuration();
+      conf = new HdfsConfiguration();
       conf.setInt("dfs.datanode.scan.period.hours", -1);      
       conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
       for (int i = 0; i < testCases.length; i++) {



Mime
View raw message