hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sc...@apache.org
Subject svn commit: r1021873 [3/3] - in /hadoop/mapreduce/trunk: ./ src/contrib/raid/src/java/org/apache/hadoop/hdfs/ src/contrib/raid/src/java/org/apache/hadoop/raid/ src/contrib/raid/src/test/org/apache/hadoop/hdfs/ src/contrib/raid/src/test/org/apache/hadoo...
Date Tue, 12 Oct 2010 18:23:36 GMT
Modified: hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java?rev=1021873&r1=1021872&r2=1021873&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidNode.java
Tue Oct 12 18:23:36 2010
@@ -23,6 +23,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.GregorianCalendar;
 import java.util.Iterator;
@@ -64,8 +65,11 @@ public class TestRaidNode extends TestCa
 
   Configuration conf;
   String namenode = null;
+  String hftp = null;
   MiniDFSCluster dfs = null;
+  MiniMRCluster mr = null;
   FileSystem fileSys = null;
+  String jobTrackerName = null;
 
   /**
    * create mapreduce and dfs clusters
@@ -75,6 +79,7 @@ public class TestRaidNode extends TestCa
     new File(TEST_DIR).mkdirs(); // Make sure data directory exists
     conf = new Configuration();
     conf.set("raid.config.file", CONFIG_FILE);
+    conf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
     conf.setBoolean("raid.config.reload", true);
     conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);
     conf.setBoolean("dfs.permissions.enabled", true);
@@ -82,76 +87,138 @@ public class TestRaidNode extends TestCa
     // scan all policies once every 5 second
     conf.setLong("raid.policy.rescan.interval", 5000);
 
+    // make all deletions not go through Trash
+    conf.set("fs.shell.delete.classname", "org.apache.hadoop.hdfs.DFSClient");
+
     // the RaidNode does the raiding inline (instead of submitting to map/reduce)
     conf.setBoolean("fs.raidnode.local", local);
 
     conf.set("raid.server.address", "localhost:0");
-    
+
     // create a dfs and map-reduce cluster
     final int taskTrackers = 4;
     final int jobTrackerPort = 60050;
 
-    dfs = new MiniDFSCluster(conf, 3, true, null);
+    dfs = new MiniDFSCluster(conf, 6, true, null);
     dfs.waitActive();
     fileSys = dfs.getFileSystem();
     namenode = fileSys.getUri().toString();
-  }
-    
-  /**
-   * create raid.xml file for RaidNode
-   */
-  private void mySetup(String path, short srcReplication, long targetReplication,
-                long metaReplication, long stripeLength) throws Exception {
-    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
-    fileWriter.write("<?xml version=\"1.0\"?>\n");
-    String str = "<configuration> " +
-                   "<srcPath prefix=\"" + path + "\"> " +
-                     "<policy name = \"RaidTest1\"> " +
-                        "<destPath> /destraid</destPath> " +
-                        "<property> " +
-                          "<name>srcReplication</name> " +
-                          "<value>" + srcReplication + "</value> " +
-                          "<description> pick only files whole replFactor is greater
than or equal to " +
-                          "</description> " + 
-                        "</property> " +
-                        "<property> " +
-                          "<name>targetReplication</name> " +
-                          "<value>" + targetReplication + "</value> " +
-                          "<description>after RAIDing, decrease the replication factor
of a file to this value." +
-                          "</description> " + 
-                        "</property> " +
-                        "<property> " +
-                          "<name>metaReplication</name> " +
-                          "<value>" + metaReplication + "</value> " +
-                          "<description> replication factor of parity file" +
-                          "</description> " + 
-                        "</property> " +
-                        "<property> " +
-                          "<name>stripeLength</name> " +
-                          "<value>" + stripeLength + "</value> " +
-                          "<description> the max number of blocks in a file to RAID
together " +
-                          "</description> " + 
-                        "</property> " +
-                        "<property> " +
-                          "<name>modTimePeriod</name> " +
-                          "<value>2000</value> " + 
-                          "<description> time (milliseconds) after a file is modified
to make it " +
-                                         "a candidate for RAIDing " +
-                          "</description> " + 
-                        "</property> " +
-                     "</policy>" +
-                   "</srcPath>" +
-                 "</configuration>";
-    fileWriter.write(str);
-    fileWriter.close();
+    mr = new MiniMRCluster(taskTrackers, namenode, 3);
+    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
+    hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
 
-    
+    FileSystem.setDefaultUri(conf, namenode);
+    conf.set("mapred.job.tracker", jobTrackerName);
   }
 
+  class ConfigBuilder {
+    private List<String> policies;
+
+    public ConfigBuilder() {
+      policies = new java.util.ArrayList<String>();
+    }
+
+    public void addPolicy(String name, short srcReplication,
+                          long targetReplication, long metaReplication, long stripeLength)
{
+      String str =
+        "<srcPath prefix=\"/user/dhruba/raidtest\"> " +
+          "<policy name = \"" + name + "\"> " +
+             "<destPath> /destraid</destPath> " +
+             "<property> " +
+               "<name>srcReplication</name> " +
+               "<value>" + srcReplication + "</value> " +
+               "<description> pick only files whole replFactor is greater than or equal
to " +
+               "</description> " +
+             "</property> " +
+             "<property> " +
+               "<name>targetReplication</name> " +
+               "<value>" + targetReplication + "</value> " +
+               "<description>after RAIDing, decrease the replication factor of a file
to this value." +
+               "</description> " +
+             "</property> " +
+             "<property> " +
+               "<name>metaReplication</name> " +
+               "<value>" + metaReplication + "</value> " +
+               "<description> replication factor of parity file" +
+               "</description> " +
+             "</property> " +
+             "<property> " +
+               "<name>stripeLength</name> " +
+               "<value>" + stripeLength + "</value> " +
+               "<description> the max number of blocks in a file to RAID together "
+
+               "</description> " +
+             "</property> " +
+             "<property> " +
+               "<name>modTimePeriod</name> " +
+               "<value>2000</value> " +
+               "<description> time (milliseconds) after a file is modified to make
it " +
+                              "a candidate for RAIDing " +
+               "</description> " +
+             "</property> " +
+          "</policy>" +
+        "</srcPath>";
+      policies.add(str);
+    }
+
+    public void addPolicy(String name, String path, short srcReplication,
+                          long targetReplication, long metaReplication, long stripeLength)
{
+      String str =
+        "<srcPath prefix=\"" + path + "\"> " +
+          "<policy name = \"" + name + "\"> " +
+             "<destPath> /destraid</destPath> " +
+             "<property> " +
+               "<name>srcReplication</name> " +
+               "<value>" + srcReplication + "</value> " +
+               "<description> pick only files whole replFactor is greater than or equal
to " +
+               "</description> " + 
+             "</property> " +
+             "<property> " +
+               "<name>targetReplication</name> " +
+               "<value>" + targetReplication + "</value> " +
+               "<description>after RAIDing, decrease the replication factor of a file
to this value." +
+               "</description> " + 
+             "</property> " +
+             "<property> " +
+               "<name>metaReplication</name> " +
+               "<value>" + metaReplication + "</value> " +
+               "<description> replication factor of parity file" +
+               "</description> " + 
+             "</property> " +
+             "<property> " +
+               "<name>stripeLength</name> " +
+               "<value>" + stripeLength + "</value> " +
+               "<description> the max number of blocks in a file to RAID together "
+
+               "</description> " + 
+             "</property> " +
+             "<property> " +
+               "<name>modTimePeriod</name> " +
+               "<value>2000</value> " + 
+               "<description> time (milliseconds) after a file is modified to make
it " +
+                              "a candidate for RAIDing " +
+               "</description> " + 
+             "</property> " +
+          "</policy>" +
+        "</srcPath>";
+      policies.add(str);
+    }
+
+    public void persist() throws IOException {
+      FileWriter fileWriter = new FileWriter(CONFIG_FILE);
+      fileWriter.write("<?xml version=\"1.0\"?>\n");
+      fileWriter.write("<configuration>");
+      for (String policy: policies) {
+        fileWriter.write(policy);
+      }
+      fileWriter.write("</configuration>");
+      fileWriter.close();
+    }
+  }
+    
   /**
    * stop clusters created earlier
    */
   private void stopClusters() throws Exception {
+    if (mr != null) { mr.shutdown(); }
     if (dfs != null) { dfs.shutdown(); }
   }
 
@@ -168,8 +235,8 @@ public class TestRaidNode extends TestCa
     int  numBlock          = 11;
     int  iter = 0;
 
+    createClusters(true);
     try {
-      createClusters(true);
       for (long blockSize : blockSizes) {
         for (long stripeLength : stripeLengths) {
            doTestPathFilter(iter, targetReplication, metaReplication,
@@ -192,7 +259,10 @@ public class TestRaidNode extends TestCa
                           long blockSize, int numBlock) throws Exception {
     LOG.info("doTestPathFilter started---------------------------:" +  " iter " + iter +
              " blockSize=" + blockSize + " stripeLength=" + stripeLength);
-    mySetup("/user/dhruba/raidtest", (short)1, targetReplication, metaReplication, stripeLength);
+    ConfigBuilder cb = new ConfigBuilder();
+    cb.addPolicy("policy1", "/user/dhruba/raidtest", (short)1, targetReplication, metaReplication,
stripeLength);
+    cb.persist();
+
     RaidShell shell = null;
     Path dir = new Path("/user/dhruba/raidtest/");
     Path file1 = new Path(dir + "/file" + iter);
@@ -230,7 +300,9 @@ public class TestRaidNode extends TestCa
           if (listPaths != null && listPaths.length == 1) {
             for (FileStatus s : listPaths) {
               LOG.info("doTestPathFilter found path " + s.getPath());
-              if (!s.getPath().toString().endsWith(".tmp")) {
+              if (!s.getPath().toString().endsWith(".tmp") &&
+                  fileSys.getFileStatus(file1).getReplication() ==
+                  targetReplication) {
                 count++;
               }
             }
@@ -247,28 +319,29 @@ public class TestRaidNode extends TestCa
       }
       // assertEquals(listPaths.length, 1); // all files raided
       LOG.info("doTestPathFilter all files found in Raid.");
+      Thread.sleep(20000); // Without this wait, unit test crashes
 
       // check for error at beginning of file
       if (numBlock >= 1) {
-        LOG.info("Check error at beginning of file.");
+        LOG.info("doTestPathFilter Check error at beginning of file.");
         simulateError(shell, fileSys, file1, crc1, 0);
       }
 
       // check for error at the beginning of second block
       if (numBlock >= 2) {
-        LOG.info("Check error at beginning of second block.");
+        LOG.info("doTestPathFilter Check error at beginning of second block.");
         simulateError(shell, fileSys, file1, crc1, blockSize + 1);
       }
 
       // check for error at the middle of third block
       if (numBlock >= 3) {
-        LOG.info("Check error at middle of third block.");
+        LOG.info("doTestPathFilter Check error at middle of third block.");
         simulateError(shell, fileSys, file1, crc1, 2 * blockSize + 10);
       }
 
       // check for error at the middle of second stripe
       if (numBlock >= stripeLength + 1) {
-        LOG.info("Check error at middle of second stripe.");
+        LOG.info("doTestPathFilter Check error at middle of second stripe.");
         simulateError(shell, fileSys, file1, crc1,
                                             stripeLength * blockSize + 100);
       }
@@ -297,8 +370,9 @@ public class TestRaidNode extends TestCa
     long stripeLength = 2;
     long blockSize = 1024;
     int numBlock = 3;
-    mySetup("/user/dhruba/policytest", srcReplication, targetReplication, metaReplication,
stripeLength);
-    RaidShell shell = null;
+    ConfigBuilder cb = new ConfigBuilder();
+    cb.addPolicy("policy1", "/user/dhruba/policytest", (short)1, targetReplication, metaReplication,
stripeLength);
+    cb.persist();
     Path dir = new Path("/user/dhruba/policytest/");
     Path file1 = new Path(dir + "/file1");
     Path file2 = new Path(dir + "/file2");
@@ -309,21 +383,9 @@ public class TestRaidNode extends TestCa
       fileSys.delete(destPath, true);
 
       // create an instance of the RaidNode
-      cnode = RaidNode.createRaidNode(null, conf);
-      int times = 10;
-
-      while (times-- > 0) {
-        try {
-          shell = new RaidShell(conf, cnode.getListenerAddress());
-        } catch (Exception e) {
-          LOG.info("doCheckPolicy unable to connect to " + 
-              cnode.getListenerAddress() + " retrying....");
-          Thread.sleep(1000);
-          continue;
-        }
-        break;
-      }
-      LOG.info("doCheckPolicy created RaidShell.");
+      Configuration localConf = new Configuration(conf);
+      localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
+      cnode = RaidNode.createRaidNode(null, localConf);
 
       // this file should be picked up RaidNode
       long crc2 = createOldFile(fileSys, file2, 2, numBlock, blockSize);
@@ -338,7 +400,9 @@ public class TestRaidNode extends TestCa
         if (listPaths != null && listPaths.length == 1) {
           for (FileStatus s : listPaths) {
             LOG.info("doCheckPolicy found path " + s.getPath());
-            if (!s.getPath().toString().endsWith(".tmp")) {
+            if (!s.getPath().toString().endsWith(".tmp") &&
+                fileSys.getFileStatus(file2).getReplication() ==
+                targetReplication) {
               count++;
               firstmodtime = s.getModificationTime();
             }
@@ -369,7 +433,9 @@ public class TestRaidNode extends TestCa
           for (FileStatus s : listPaths) {
             LOG.info("doCheckPolicy found path " + s.getPath() + " " + s.getModificationTime());
             if (!s.getPath().toString().endsWith(".tmp") &&
-                s.getModificationTime() > firstmodtime) {
+                s.getModificationTime() > firstmodtime &&
+                fileSys.getFileStatus(file2).getReplication() ==
+                targetReplication) {
               count++;
             }
           }
@@ -389,7 +455,6 @@ public class TestRaidNode extends TestCa
                                           StringUtils.stringifyException(e));
       throw e;
     } finally {
-      shell.close();
       if (cnode != null) { cnode.stop(); cnode.join(); }
       LOG.info("doTestPathFilter delete file " + file1);
       fileSys.delete(file1, false);
@@ -397,112 +462,93 @@ public class TestRaidNode extends TestCa
     LOG.info("doCheckPolicy completed:");
   }
 
+  private void createTestFiles(String path, String destpath) throws IOException {
+    long blockSize         = 1024L;
+    Path dir = new Path(path);
+    Path destPath = new Path(destpath);
+    fileSys.delete(dir, true);
+    fileSys.delete(destPath, true);
+   
+    for(int i = 0 ; i < 10; i++){
+      Path file = new Path(path + "file" + i);
+      createOldFile(fileSys, file, 1, 7, blockSize);
+    }
+  }
+
   /**
    * Test dist Raid
    */
   public void testDistRaid() throws Exception {
     LOG.info("Test testDistRaid started.");
-    long blockSize         = 1024L;
     long targetReplication = 2;
     long metaReplication   = 2;
     long stripeLength      = 3;
     short srcReplication = 1;
 
+    createClusters(false);
+    ConfigBuilder cb = new ConfigBuilder();
+    cb.addPolicy("policy1", "/user/dhruba/raidtest", (short)1, targetReplication, metaReplication,
stripeLength);
+    cb.addPolicy("policy2", "/user/dhruba/raidtest2", (short)1, targetReplication, metaReplication,
stripeLength);
+    cb.persist();
+
+    RaidNode cnode = null;
     try {
-      createClusters(false);
-      mySetup("/user/dhruba/raidtest", srcReplication, targetReplication, metaReplication,
stripeLength);
+      createTestFiles("/user/dhruba/raidtest/", "/destraid/user/dhruba/raidtest");
+      createTestFiles("/user/dhruba/raidtest2/", "/destraid/user/dhruba/raidtest2");
       LOG.info("Test testDistRaid created test files");
 
-      Path dir = new Path("/user/dhruba/raidtest/");
-      Path destPath = new Path("/destraid/user/dhruba/raidtest");
-      fileSys.delete(dir, true);
-      fileSys.delete(destPath, true);
-     
-      ConfigManager configMgr  = new ConfigManager(conf);
-      configMgr.reloadConfigsIfNecessary();
-      LOG.info(" testDistRaid ConfigFile Loaded");
-
-      // activate all categories
-      Collection<PolicyList> all = configMgr.getAllPolicies();   
-      PolicyList[] sorted = all.toArray(new PolicyList[all.size()]);
-      Iterator<PolicyInfo> pi = sorted[0].getAll().iterator();
-      PolicyInfo p = pi.next();
-      List<FileStatus> ll = new ArrayList<FileStatus>();
-
-      for(int i = 0 ; i < 10; i++){
-        Path file = new Path("/user/dhruba/raidtest/file"+i);
-        createOldFile(fileSys, file, 1, 7, blockSize);
-        FileStatus st = fileSys.getFileStatus(file);
-        ll.add(st);
+      Configuration localConf = new Configuration(conf);
+      localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
+      cnode = RaidNode.createRaidNode(null, localConf);
+      // Verify the policies are parsed correctly
+      for (PolicyList policyList : cnode.getAllPolicies()) {
+        for (PolicyInfo p : policyList.getAll()) {
+          if (p.getName().equals("policy1")) {
+            Path srcPath = new Path("/user/dhruba/raidtest");
+            assertTrue(p.getSrcPath().equals(
+                srcPath.makeQualified(srcPath.getFileSystem(conf))));
+          } else {
+            assertTrue(p.getName().equals("policy2"));
+            Path srcPath = new Path("/user/dhruba/raidtest2");
+            assertTrue(p.getSrcPath().equals(
+                srcPath.makeQualified(srcPath.getFileSystem(conf))));
+          }
+          assertEquals(targetReplication,
+                       Integer.parseInt(p.getProperty("targetReplication")));
+          assertEquals(metaReplication,
+                       Integer.parseInt(p.getProperty("metaReplication")));
+          assertEquals(stripeLength,
+                       Integer.parseInt(p.getProperty("stripeLength")));
+        }
       }
-      
-      DistRaid dr = new DistRaid(conf);      
-      dr.addRaidPaths(p, ll);
-      dr.doDistRaid();
+
+      long start = System.currentTimeMillis();
+      final int MAX_WAITTIME = 300000;
+      while (cnode.jobMonitor.jobsMonitored() < 2 &&
+             System.currentTimeMillis() - start < MAX_WAITTIME) {
+        Thread.sleep(1000);
+      }
+      this.assertEquals(cnode.jobMonitor.jobsMonitored(), 2);
+
+      start = System.currentTimeMillis();
+      while (cnode.jobMonitor.jobsSucceeded() < 2 &&
+             System.currentTimeMillis() - start < MAX_WAITTIME) {
+        Thread.sleep(1000);
+      }
+      this.assertEquals(cnode.jobMonitor.jobsSucceeded(), 2);
+
       LOG.info("Test testDistRaid successful.");
       
     } catch (Exception e) {
       LOG.info("testDistRaid Exception " + e + StringUtils.stringifyException(e));
       throw e;
     } finally {
+      if (cnode != null) { cnode.stop(); cnode.join(); }
       stopClusters();
     }
     LOG.info("Test testDistRaid completed.");
   }
   
-  /**
-   * Test the case where the source and destination paths conflict.
-   * @throws Exception
-   */
-  public void testConflictingPaths() throws Exception {
-    LOG.info("Test testConflictingPaths started");
-    long targetReplication = 2;
-    long metaReplication   = 2;
-    long stripeLength      = 3;
-    short srcReplication = 1;
-    long modTimePeriod = 0;
-    try {
-      createClusters(false);
-      mySetup("/user/d/raidtest", srcReplication, targetReplication,
-          metaReplication, stripeLength);
-      // We dont need this to run, just need the object.
-      RaidNode cnode = RaidNode.createRaidNode(null, conf);
-      cnode.stop();
-      cnode.join();
-
-      createOldFile(fileSys, new Path("/user/d/raidtest/f1"), 2, 7, 8192L);
-      LOG.info("Test testConflictingPaths created test files");
-
-      long now = System.currentTimeMillis();
-
-      // Test the regular case.
-      LOG.info("Test testConflictingPaths testing the regular case");
-      List<FileStatus> selected = cnode.selectFiles(conf,
-          new Path("/user/d/raidtest*"), "/raid",
-          modTimePeriod, srcReplication, now);
-      assertTrue(selected.size() > 0);
-
-      // Test the conflicting case: src under dest.
-      LOG.info("Test testConflictingPaths testing src under dest");
-      selected = cnode.selectFiles(conf,
-          new Path("/user/d/raidtest*"), "/user/d",
-          modTimePeriod, srcReplication, now);
-      assertEquals(0, selected.size());
-
-      // Test the conflicting case: dest under src.
-      LOG.info("Test testConflictingPaths testing dest under src");
-      selected = cnode.selectFiles(conf,
-          new Path("/user/d*"), "/user/d/raidtest",
-          modTimePeriod, srcReplication, now);
-      assertEquals(0, selected.size());
-
-      LOG.info("Test testConflictingPaths succeeded.");
-    } finally {
-      stopClusters();
-    }
-    LOG.info("Test testConflictingPaths completed.");
-  }
-
   //
   // simulate a corruption at specified offset and verify that eveyrthing is good
   //
@@ -573,4 +619,50 @@ public class TestRaidNode extends TestCa
       fail("CRC mismatch of files " + name1 + " with file " + name2);
     }
   }
+
+  public void testSuspendTraversal() throws Exception {
+    LOG.info("Test testSuspendTraversal started.");
+    long targetReplication = 2;
+    long metaReplication   = 2;
+    long stripeLength      = 3;
+    short srcReplication = 1;
+
+    createClusters(false);
+    ConfigBuilder cb = new ConfigBuilder();
+    cb.addPolicy("policy1", "/user/dhruba/raidtest", (short)1, targetReplication, metaReplication,
stripeLength);
+    cb.persist();
+
+    RaidNode cnode = null;
+    try {
+      createTestFiles("/user/dhruba/raidtest/", "/destraid/user/dhruba/raidtest");
+      LOG.info("Test testSuspendTraversal created test files");
+
+      Configuration localConf = new Configuration(conf);
+      localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
+      localConf.setInt("raid.distraid.max.files", 3);
+      final int numJobsExpected = 4; // 10 test files: 4 jobs with 3 files each.
+      cnode = RaidNode.createRaidNode(null, localConf);
+
+      long start = System.currentTimeMillis();
+      final int MAX_WAITTIME = 300000;
+
+      start = System.currentTimeMillis();
+      while (cnode.jobMonitor.jobsSucceeded() < numJobsExpected &&
+             System.currentTimeMillis() - start < MAX_WAITTIME) {
+        Thread.sleep(1000);
+      }
+      this.assertEquals(cnode.jobMonitor.jobsMonitored(), numJobsExpected);
+      this.assertEquals(cnode.jobMonitor.jobsSucceeded(), numJobsExpected);
+
+      LOG.info("Test testSuspendTraversal successful.");
+
+    } catch (Exception e) {
+      LOG.info("testSuspendTraversal Exception " + e + StringUtils.stringifyException(e));
+      throw e;
+    } finally {
+      if (cnode != null) { cnode.stop(); cnode.join(); }
+      stopClusters();
+    }
+    LOG.info("Test testSuspendTraversal completed.");
+  }
 }

Modified: hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java?rev=1021873&r1=1021872&r2=1021873&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/raid/src/test/org/apache/hadoop/raid/TestRaidPurge.java
Tue Oct 12 18:23:36 2010
@@ -195,7 +195,6 @@ public class TestRaidPurge extends TestC
     LOG.info("doTestPurge started---------------------------:" +  " iter " + iter +
              " blockSize=" + blockSize + " stripeLength=" + stripeLength);
     mySetup(srcPath, targetReplication, metaReplication, stripeLength);
-    RaidShell shell = null;
     Path dir = new Path("/user/dhruba/raidtest/");
     Path file1 = new Path(dir + "/file" + iter);
     RaidNode cnode = null;
@@ -207,21 +206,9 @@ public class TestRaidPurge extends TestC
       LOG.info("doTestPurge created test files for iteration " + iter);
 
       // create an instance of the RaidNode
-      cnode = RaidNode.createRaidNode(null, conf);
-      int times = 10;
-
-      while (times-- > 0) {
-        try {
-          shell = new RaidShell(conf, cnode.getListenerAddress());
-        } catch (Exception e) {
-          LOG.info("doTestPurge unable to connect to " + 
-              cnode.getListenerAddress() + " retrying....");
-          Thread.sleep(1000);
-          continue;
-        }
-        break;
-      }
-      LOG.info("doTestPurge created RaidShell.");
+      Configuration localConf = new Configuration(conf);
+      localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
+      cnode = RaidNode.createRaidNode(null, localConf);
       FileStatus[] listPaths = null;
 
       // wait till file is raided
@@ -266,7 +253,6 @@ public class TestRaidPurge extends TestC
                                           StringUtils.stringifyException(e));
       throw e;
     } finally {
-      shell.close();
       if (cnode != null) { cnode.stop(); cnode.join(); }
       LOG.info("doTestPurge delete file " + file1);
       fileSys.delete(file1, true);



Mime
View raw message