hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r512904 - in /lucene/hadoop/trunk: CHANGES.txt src/test/org/apache/hadoop/dfs/TestDecommission.java
Date Wed, 28 Feb 2007 19:07:40 GMT
Author: cutting
Date: Wed Feb 28 11:07:39 2007
New Revision: 512904

URL: http://svn.apache.org/viewvc?view=rev&rev=512904
Log:
HADOOP-1044.  Fix HDFS's TestDecommission to not spuriously fail.  Contributed by Wendy.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=512904&r1=512903&r2=512904
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Feb 28 11:07:39 2007
@@ -153,6 +153,9 @@
 45. HADOOP-928.  Make checksums optional per FileSystem.
     (Hairong Kuang via cutting)
 
+46. HADOOP-1044.  Fix HDFS's TestDecommission to not spuriously fail.
+    (Wendy Chien via cutting)
+
 
 Release 0.11.2 - 2007-02-16
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java?view=diff&rev=512904&r1=512903&r2=512904
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDecommission.java Wed Feb 28 11:07:39
2007
@@ -20,6 +20,8 @@
 import junit.framework.TestCase;
 import java.io.*;
 import java.util.Random;
+import java.util.ArrayList;
+import java.util.Iterator;
 import java.net.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -34,23 +36,35 @@
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   static final int fileSize = 16384;
-  static final int numDatanodes = 5;
+  static final int numIterations = 2;
+  static final int numDatanodes = numIterations + 3;
+
 
   Random myrand = new Random();
   Path hostsFile;
   Path excludeFile;
 
+  ArrayList<String> decommissionedNodes = new ArrayList<String>(numIterations);
+
   private enum NodeState {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; }
 
-  private void writeConfigFile(FileSystem fs, Path name, String node) 
+  private void writeConfigFile(FileSystem fs, Path name, ArrayList<String> nodes) 
       throws IOException {
+
     // delete if it already exists
     if (fs.exists(name)) {
       fs.delete(name);
     }
+
     FSDataOutputStream stm = fs.create(name);
-    stm.writeBytes(node);
-    stm.writeBytes("\n");
+    
+    if (nodes != null) {
+      for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
+        String node = it.next();
+        stm.writeBytes(node);
+        stm.writeBytes("\n");
+      }
+    }
     stm.close();
   }
 
@@ -149,8 +163,10 @@
     String nodename = info[index].getName();
     System.out.println("Decommissioning node: " + nodename);
 
-    // write nodename into the exclude file. 
-    writeConfigFile(localFileSys, excludeFile, nodename);
+    // write nodename into the exclude file.
+    ArrayList<String> nodes = (ArrayList<String>)decommissionedNodes.clone();
+    nodes.add(nodename);
+    writeConfigFile(localFileSys, excludeFile, nodes);
     dfs.refreshNodes();
     return nodename;
   }
@@ -163,7 +179,7 @@
     DistributedFileSystem dfs = (DistributedFileSystem) filesys;
 
     System.out.println("Commissioning nodes.");
-    writeConfigFile(localFileSys, excludeFile, "");
+    writeConfigFile(localFileSys, excludeFile, null);
     dfs.refreshNodes();
   }
 
@@ -231,7 +247,7 @@
     hostsFile = new Path(dir, "hosts");
     excludeFile = new Path(dir, "exclude");
     conf.set("dfs.hosts.exclude", excludeFile.toString());
-    writeConfigFile(localFileSys, excludeFile, "");
+    writeConfigFile(localFileSys, excludeFile, null);
 
     MiniDFSCluster cluster = new MiniDFSCluster(65312, conf, numDatanodes, false);
     // Now wait for 15 seconds to give datanodes chance to register
@@ -250,7 +266,7 @@
     DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
 
     try {
-      for (int iteration = 0; iteration < 2; iteration++) {
+      for (int iteration = 0; iteration < numIterations; iteration++) {
         //
         // Decommission one node. Verify that node is decommissioned.
         // Verify that replication factor of file has increased from 3
@@ -265,6 +281,7 @@
         commissionNode(fileSys, localFileSys, downnode);
         waitNodeState(fileSys, downnode, NodeState.NORMAL);
         downnode  = decommissionNode(client, fileSys, localFileSys);
+        decommissionedNodes.add(downnode);
         waitNodeState(fileSys, downnode, NodeState.DECOMMISSIONED);
         checkFile(fileSys, file1, 3, downnode);
         cleanupFile(fileSys, file1);



Mime
View raw message