hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1062020 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Fri, 21 Jan 2011 20:47:04 GMT
Author: suresh
Date: Fri Jan 21 20:47:03 2011
New Revision: 1062020

URL: http://svn.apache.org/viewvc?rev=1062020&view=rev
Log:
HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts" and "dfs.hosts.exlude".
Contributed by Erik Steffl.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jan 21 20:47:03 2011
@@ -41,6 +41,9 @@ Trunk (unreleased changes)
     HDFS-1586. Add InterfaceAudience and InterfaceStability annotations to 
     MiniDFSCluster. (suresh)
 
+    HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
+    and "dfs.hosts.exlude". (Erik Steffl via suresh)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Jan 21 20:47:03
2011
@@ -132,6 +132,8 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
   public static final String  DFS_NAMENODE_CHECKPOINT_DIR_KEY = "dfs.namenode.checkpoint.dir";
   public static final String  DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY = "dfs.namenode.checkpoint.edits.dir";
+  public static final String  DFS_HOSTS = "dfs.hosts";
+  public static final String  DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
 
   // Much code in hdfs is not yet updated to use these keys.
   public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri
Jan 21 20:47:03 2011
@@ -313,8 +313,9 @@ public class FSNamesystem implements FSC
       this.dir = new FSDirectory(fsImage, this, conf);
     }
     this.safeMode = new SafeModeInfo(conf);
-    this.hostsReader = new HostsFileReader(conf.get("dfs.hosts",""),
-                        conf.get("dfs.hosts.exclude",""));
+    this.hostsReader = new HostsFileReader(
+      conf.get(DFSConfigKeys.DFS_HOSTS,""),
+      conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE,""));
     if (isBlockTokenEnabled) {
       blockTokenSecretManager = new BlockTokenSecretManager(true,
           blockKeyUpdateInterval, blockTokenLifetime);
@@ -3628,8 +3629,8 @@ public class FSNamesystem implements FSC
     // Update the file names and refresh internal includes and excludes list
     if (conf == null)
       conf = new HdfsConfiguration();
-    hostsReader.updateFileNames(conf.get("dfs.hosts",""), 
-                                conf.get("dfs.hosts.exclude", ""));
+    hostsReader.updateFileNames(conf.get(DFSConfigKeys.DFS_HOSTS,""), 
+                                conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
     hostsReader.refresh();
     writeLock();
     try {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Jan 21
20:47:03 2011
@@ -1262,7 +1262,7 @@ public class MiniDFSCluster {
   
   private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile) throws IOException
{
     if (setupHostsFile) {
-      String hostsFile = conf.get("dfs.hosts", "").trim();
+      String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim();
       if (hostsFile.length() == 0) {
         throw new IOException("Parameter dfs.hosts is not setup in conf");
       }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Fri Jan 21
20:47:03 2011
@@ -72,7 +72,7 @@ public class TestDecommission {
     
     // Setup conf
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
-    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
+    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
@@ -373,7 +373,7 @@ public class TestDecommission {
    */
   @Test
   public void testHostsFile() throws IOException, InterruptedException {
-    conf.set("dfs.hosts", hostsFile.toUri().getPath());
+    conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
     int numDatanodes = 1;
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
         .setupHostsFile(true).build();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
Fri Jan 21 20:47:03 2011
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
@@ -104,8 +105,10 @@ public class NNThroughputBenchmark {
     // by calling name-node methods directly
     config.setInt("dfs.namenode.handler.count", 1);
     // set exclude file
-    config.set("dfs.hosts.exclude", "${hadoop.tmp.dir}/dfs/hosts/exclude");
-    File excludeFile = new File(config.get("dfs.hosts.exclude", "exclude"));
+    config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,
+      "${hadoop.tmp.dir}/dfs/hosts/exclude");
+    File excludeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE,
+      "exclude"));
     if(! excludeFile.exists()) {
       if(!excludeFile.getParentFile().mkdirs())
         throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile);
@@ -1120,7 +1123,7 @@ public class NNThroughputBenchmark {
     }
 
     private void decommissionNodes() throws IOException {
-      String excludeFN = config.get("dfs.hosts.exclude", "exclude");
+      String excludeFN = config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "exclude");
       FileOutputStream excludeFile = new FileOutputStream(excludeFN);
       excludeFile.getChannel().truncate(0L);
       int nrDatanodes = blockReportObject.getNumDatanodes();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1062020&r1=1062019&r2=1062020&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
Fri Jan 21 20:47:03 2011
@@ -72,7 +72,7 @@ public class TestDecommissioningStatus {
     dir = new Path(workingDir, "build/test/data/work-dir/decommission");
     assertTrue(localFileSys.mkdirs(dir));
     excludeFile = new Path(dir, "exclude");
-    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
+    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
     conf
         .setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
     conf.setInt("dfs.heartbeat.interval", 1);



Mime
View raw message