hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r508599 - in /lucene/hadoop/trunk: CHANGES.txt src/test/org/apache/hadoop/dfs/TestFileCorruption.java src/test/org/apache/hadoop/dfs/TestFsck.java src/test/org/apache/hadoop/dfs/TestRestartDFS.java
Date Fri, 16 Feb 2007 21:48:55 GMT
Author: cutting
Date: Fri Feb 16 13:48:55 2007
New Revision: 508599

URL: http://svn.apache.org/viewvc?view=rev&rev=508599
Log:
HADOOP-889.  Remove duplicate code from HDFS unit tests.  Contributed by Milind.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=508599&r1=508598&r2=508599
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Fri Feb 16 13:48:55 2007
@@ -62,6 +62,9 @@
 18. HADOOP-1021.  Fix MRCaching-based unit tests on Windows.
     (Nigel Daley via cutting)
 
+19. HADOOP-889.  Remove duplicate code from HDFS unit tests.
+    (Milind Bhandarkar via cutting)
+
 
 Branch 0.11 (unreleased)
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java?view=diff&rev=508599&r1=508598&r2=508599
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java Fri Feb 16
13:48:55 2007
@@ -19,13 +19,9 @@
 package org.apache.hadoop.dfs;
 
 import java.io.*;
-import java.util.Random;
 import junit.framework.*;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 
 /**
  * A JUnit test for corrupted file handling.
@@ -34,51 +30,6 @@
  */
 public class TestFileCorruption extends TestCase {
   
-  private static final int NFILES = 20;
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
-
-  /** class MyFile contains enough information to recreate the contents of
-   * a single file.
-   */
-  private static class MyFile {
-    private static Random gen = new Random();
-    private static final int MAX_LEVELS = 3;
-    private static final int MAX_SIZE = 8*1024;
-    private static String[] dirNames = {
-      "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
-    };
-    private String name = "";
-    private int size;
-    private long seed;
-    
-    MyFile() {
-      int nLevels = gen.nextInt(MAX_LEVELS);
-      if(nLevels != 0) {
-        int[] levels = new int[nLevels];
-        for (int idx = 0; idx < nLevels; idx++) {
-          levels[idx] = gen.nextInt(10);
-        }
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < nLevels; idx++) {
-          sb.append(dirNames[levels[idx]]);
-          sb.append("/");
-        }
-        name = sb.toString();
-      }
-      long fidx = -1;
-      while (fidx < 0) { fidx = gen.nextLong(); }
-      name = name + Long.toString(fidx);
-      size = gen.nextInt(MAX_SIZE);
-      seed = gen.nextLong();
-    }
-    
-    String getName() { return name; }
-    int getSize() { return size; }
-    long getSeed() { return seed; }
-  }
-  
   public TestFileCorruption(String testName) {
     super(testName);
   }
@@ -91,105 +42,31 @@
   protected void tearDown() throws Exception {
   }
   
-  /** create NFILES with random names and directory hierarchies
-   * with random (but reproducible) data in them.
-   */
-  private static MyFile[] createFiles(String fsname, String topdir)
-  throws IOException {
-    MyFile[] files = new MyFile[NFILES];
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      files[idx] = new MyFile();
-    }
-    
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      if (!fs.mkdirs(fPath.getParent())) {
-        throw new IOException("Mkdirs failed to create " + 
-                              fPath.getParent().toString());
-      }
-      FSDataOutputStream out = fs.create(fPath);
-      byte[] toWrite = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toWrite);
-      out.write(toWrite);
-      out.close();
-      toWrite = null;
-    }
-    
-    return files;
-  }
-  
-  /** check if the files have been copied correctly. */
-  private static boolean checkFiles(String fsname, String topdir, MyFile[] files) 
-  throws IOException {
-    
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      FSDataInputStream in = fs.open(fPath);
-      byte[] toRead = new byte[files[idx].getSize()];
-      byte[] toCompare = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toCompare);
-      assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
-      in.close();
-      for (int i = 0; i < toRead.length; i++) {
-        if (toRead[i] != toCompare[i]) {
-          return false;
-        }
-      }
-      toRead = null;
-      toCompare = null;
-    }
-    
-    return true;
-  }
-  
-  /** delete directory and everything underneath it.*/
-  private static void deldir(String fsname, String topdir)
-  throws IOException {
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    fs.delete(root);
-  }
-  
   /** check if DFS can handle corrupted blocks properly */
   public void testFileCorruption() throws Exception {
-    String namenode = null;
     MiniDFSCluster cluster = null;
-    MyFile[] files = null;
+    DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024);
     try {
       Configuration conf = new Configuration();
       cluster = new MiniDFSCluster(65314, conf, 3, false);
-      namenode = conf.get("fs.default.name", "local");
-      if (!"local".equals(namenode)) {
-        files = createFiles(namenode, "/srcdat");
-        // Now deliberately remove the blocks
-        File data_dir = new File(System.getProperty("test.build.data"),
-            "dfs/data/data5/data");
-        assertTrue("data directory does not exist", data_dir.exists());
-        File[] blocks = data_dir.listFiles();
-        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length
> 0));
-        for (int idx = 0; idx < blocks.length; idx++) {
-          if (!blocks[idx].getName().startsWith("blk_")) {
-            continue;
-          }
-          System.out.println("Deliberately removing file "+blocks[idx].getName());
-          assertTrue("Cannot remove file.", blocks[idx].delete());
+      FileSystem fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat");
+      // Now deliberately remove the blocks
+      File data_dir = new File(System.getProperty("test.build.data"),
+          "dfs/data/data5/data");
+      assertTrue("data directory does not exist", data_dir.exists());
+      File[] blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length
> 0));
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (!blocks[idx].getName().startsWith("blk_")) {
+          continue;
         }
-        assertTrue("Corrupted replicas not handled properly.",
-            checkFiles(namenode, "/srcdat", files));
-        deldir(namenode, "/srcdat");
+        System.out.println("Deliberately removing file "+blocks[idx].getName());
+        assertTrue("Cannot remove file.", blocks[idx].delete());
       }
+      assertTrue("Corrupted replicas not handled properly.",
+          util.checkFiles(fs, "/srcdat"));
+      util.cleanup(fs, "/srcdat");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java?view=diff&rev=508599&r1=508598&r2=508599
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java Fri Feb 16 13:48:55 2007
@@ -19,9 +19,7 @@
 package org.apache.hadoop.dfs;
 
 import java.io.ByteArrayOutputStream;
-import java.io.IOException;
 import java.io.PrintStream;
-import java.util.Random;
 import junit.framework.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
@@ -32,52 +30,7 @@
  * @author Milind Bhandarkar
  */
 public class TestFsck extends TestCase {
-  
-  private static final int NFILES = 20;
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
-
-  /** class MyFile contains enough information to recreate the contents of
-   * a single file.
-   */
-  private static class MyFile {
-    private static Random gen = new Random();
-    private static final int MAX_LEVELS = 3;
-    private static final int MAX_SIZE = 8*1024;
-    private static String[] dirNames = {
-      "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
-    };
-    private String name = "";
-    private int size;
-    private long seed;
-    
-    MyFile() {
-      int nLevels = gen.nextInt(MAX_LEVELS);
-      if(nLevels != 0) {
-        int[] levels = new int[nLevels];
-        for (int idx = 0; idx < nLevels; idx++) {
-          levels[idx] = gen.nextInt(10);
-        }
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < nLevels; idx++) {
-          sb.append(dirNames[levels[idx]]);
-          sb.append("/");
-        }
-        name = sb.toString();
-      }
-      long fidx = -1;
-      while (fidx < 0) { fidx = gen.nextLong(); }
-      name = name + Long.toString(fidx);
-      size = gen.nextInt(MAX_SIZE);
-      seed = gen.nextLong();
-    }
-    
-    String getName() { return name; }
-    int getSize() { return size; }
-    long getSeed() { return seed; }
-  }
-  
+ 
   public TestFsck(String testName) {
     super(testName);
   }
@@ -90,69 +43,25 @@
   protected void tearDown() throws Exception {
   }
   
-  /** create NFILES with random names and directory hierarchies
-   * with random (but reproducible) data in them.
-   */
-  private static MyFile[] createFiles(String fsname, String topdir)
-  throws IOException {
-    MyFile[] files = new MyFile[NFILES];
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      files[idx] = new MyFile();
-    }
-    
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      if (!fs.mkdirs(fPath.getParent())) {
-        throw new IOException("Mkdirs failed to create directory " +
-                              fPath.getParent().toString());
-      }
-      FSDataOutputStream out = fs.create(fPath);
-      byte[] toWrite = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toWrite);
-      out.write(toWrite);
-      out.close();
-      toWrite = null;
-    }
-    
-    return files;
-  }
-  
-  /** delete directory and everything underneath it.*/
-  private static void deldir(String fsname, String topdir)
-  throws IOException {
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    fs.delete(root);
-  }
-  
   /** do fsck */
   public void testFsck() throws Exception {
-    String namenode = null;
+    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
       cluster = new MiniDFSCluster(65314, conf, 4, false);
-      namenode = conf.get("fs.default.name", "local");
-      if (!"local".equals(namenode)) {
-        MyFile[] files = createFiles(namenode, "/srcdat");
-        PrintStream oldOut = System.out;
-        ByteArrayOutputStream bStream = new ByteArrayOutputStream();
-        PrintStream newOut = new PrintStream(bStream, true);
-        System.setOut(newOut);
-        assertEquals(0, new DFSck().doMain(conf, new String[] {"/"}));
-        System.setOut(oldOut);
-        String outStr = bStream.toString();
-        assertTrue(-1 != outStr.indexOf("HEALTHY"));
-        System.out.println(outStr);
-        deldir(namenode, "/srcdat");
-      }
+      FileSystem fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat");
+      PrintStream oldOut = System.out;
+      ByteArrayOutputStream bStream = new ByteArrayOutputStream();
+      PrintStream newOut = new PrintStream(bStream, true);
+      System.setOut(newOut);
+      assertEquals(0, new DFSck().doMain(conf, new String[] {"/"}));
+      System.setOut(oldOut);
+      String outStr = bStream.toString();
+      assertTrue(-1 != outStr.indexOf("HEALTHY"));
+      System.out.println(outStr);
+      util.cleanup(fs, "/srcdat");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }
@@ -160,25 +69,23 @@
   
   /** do fsck on non-existent path*/
   public void testFsckNonExistent() throws Exception {
-    String namenode = null;
+    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
       cluster = new MiniDFSCluster(65314, conf, 4, false);
-      namenode = conf.get("fs.default.name", "local");
-      if (!"local".equals(namenode)) {
-        MyFile[] files = createFiles(namenode, "/srcdat");
-        PrintStream oldOut = System.out;
-        ByteArrayOutputStream bStream = new ByteArrayOutputStream();
-        PrintStream newOut = new PrintStream(bStream, true);
-        System.setOut(newOut);
-        assertEquals(0, new DFSck().doMain(conf, new String[] {"/non-existent"}));
-        System.setOut(oldOut);
-        String outStr = bStream.toString();
-        assertEquals(-1, outStr.indexOf("HEALTHY"));
-        System.out.println(outStr);
-        deldir(namenode, "/srcdat");
-      }
+      FileSystem fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat");
+      PrintStream oldOut = System.out;
+      ByteArrayOutputStream bStream = new ByteArrayOutputStream();
+      PrintStream newOut = new PrintStream(bStream, true);
+      System.setOut(newOut);
+      assertEquals(0, new DFSck().doMain(conf, new String[] {"/non-existent"}));
+      System.setOut(oldOut);
+      String outStr = bStream.toString();
+      assertEquals(-1, outStr.indexOf("HEALTHY"));
+      System.out.println(outStr);
+      util.cleanup(fs, "/srcdat");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java?view=diff&rev=508599&r1=508598&r2=508599
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestRestartDFS.java Fri Feb 16 13:48:55
2007
@@ -34,157 +34,37 @@
  */
 public class TestRestartDFS extends TestCase {
   
-  private static final int NFILES = 20;
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
   private static Configuration conf = new Configuration();
 
-  /** class MyFile contains enough information to recreate the contents of
-   * a single file.
-   */
-  private static class MyFile {
-    private static Random gen = new Random();
-    private static final int MAX_LEVELS = 3;
-    private static final int MAX_SIZE = 8*1024;
-    private static String[] dirNames = {
-      "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
-    };
-    private String name = "";
-    private int size;
-    private long seed;
-    
-    MyFile() {
-      int nLevels = gen.nextInt(MAX_LEVELS);
-      if(nLevels != 0) {
-        int[] levels = new int[nLevels];
-        for (int idx = 0; idx < nLevels; idx++) {
-          levels[idx] = gen.nextInt(10);
-        }
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < nLevels; idx++) {
-          sb.append(dirNames[levels[idx]]);
-          sb.append("/");
-        }
-        name = sb.toString();
-      }
-      long fidx = -1;
-      while (fidx < 0) { fidx = gen.nextLong(); }
-      name = name + Long.toString(fidx);
-      size = gen.nextInt(MAX_SIZE);
-      seed = gen.nextLong();
-    }
-    
-    String getName() { return name; }
-    int getSize() { return size; }
-    long getSeed() { return seed; }
-  }
-  
   public TestRestartDFS(String testName) {
     super(testName);
   }
 
-  
-  
   protected void setUp() throws Exception {
   }
 
   protected void tearDown() throws Exception {
   }
   
-  /** create NFILES with random names and directory hierarchies
-   * with random (but reproducible) data in them.
-   */
-  private static MyFile[] createFiles(String fsname, String topdir)
-  throws IOException {
-    MyFile[] files = new MyFile[NFILES];
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      files[idx] = new MyFile();
-    }
-    
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      if (!fs.mkdirs(fPath.getParent())) {
-        throw new IOException("Mkdirs failed to create " + 
-                              fPath.getParent().toString());
-      }
-      FSDataOutputStream out = fs.create(fPath);
-      byte[] toWrite = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toWrite);
-      out.write(toWrite);
-      out.close();
-      toWrite = null;
-    }
-    
-    return files;
-  }
-  
-  /** check if the files have been copied correctly. */
-  private static boolean checkFiles(String fsname, String topdir, MyFile[] files) 
-  throws IOException {
-    
-    //Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < NFILES; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      FSDataInputStream in = fs.open(fPath);
-      byte[] toRead = new byte[files[idx].getSize()];
-      byte[] toCompare = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toCompare);
-      assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
-      in.close();
-      for (int i = 0; i < toRead.length; i++) {
-        if (toRead[i] != toCompare[i]) {
-          return false;
-        }
-      }
-      toRead = null;
-      toCompare = null;
-    }
-    
-    return true;
-  }
-  
-  /** delete directory and everything underneath it.*/
-  private static void deldir(String fsname, String topdir)
-  throws IOException {
-    FileSystem fs = FileSystem.getNamed(fsname, conf);
-    Path root = new Path(topdir);
-    fs.delete(root);
-  }
-  
   /** check if DFS remains in proper condition after a restart */
   public void testRestartDFS() throws Exception {
     String namenode = null;
     MiniDFSCluster cluster = null;
-    MyFile[] files = null;
+    DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
     try {
       cluster = new MiniDFSCluster(65314, conf, 4, false);
-      namenode = conf.get("fs.default.name", "local");
-      if (!"local".equals(namenode)) {
-        files = createFiles(namenode, "/srcdat");
-      }
+      FileSystem fs = cluster.getFileSystem();
+      files.createFiles(fs, "/srcdat");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }
-    assertTrue("Error creating files", files != null);
     try {
       // Here we restart the MiniDFScluster without formatting namenode
       cluster = new MiniDFSCluster(65320, conf, 4, false, false);
-      namenode = conf.get("fs.default.name", "local");
-      if (!"local".equals(namenode)) {
-        assertTrue("Filesystem corrupted after restart.",
-            checkFiles(namenode, "/srcdat", files));
-        deldir(namenode, "/srcdat");
-      }
+      FileSystem fs = cluster.getFileSystem();
+      assertTrue("Filesystem corrupted after restart.",
+            files.checkFiles(fs, "/srcdat"));
+      files.cleanup(fs, "/srcdat");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }



Mime
View raw message