hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r732533 - in /hadoop/core/trunk: ./ src/core/org/apache/hadoop/fs/ src/hdfs/org/apache/hadoop/hdfs/ src/test/org/apache/hadoop/fs/ src/test/org/apache/hadoop/hdfs/ src/test/org/apache/hadoop/mapred/ src/test/org/apache/hadoop/mapred/lib/
Date Wed, 07 Jan 2009 22:16:44 GMT
Author: szetszwo
Date: Wed Jan  7 14:16:43 2009
New Revision: 732533

URL: http://svn.apache.org/viewvc?rev=732533&view=rev
Log:
HADOOP-4648. Remove deprecated InMemoryFileSystem and ChecksumDistributedFileSystem.  (cdouglas
via szetszwo)

Removed:
    hadoop/core/trunk/src/core/org/apache/hadoop/fs/InMemoryFileSystem.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/ChecksumDistributedFileSystem.java
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Jan  7 14:16:43 2009
@@ -10,6 +10,9 @@
     HADOOP-4941. Remove deprecated FileSystem methods: getBlockSize(Path f),
     getLength(Path f) and getReplication(Path src).  (szetszwo)
 
+    HADOOP-4648. Remove obsolete, deprecated InMemoryFileSystem and
+    ChecksumDistributedFileSystem.  (cdouglas via szetszwo)
+
   NEW FEATURES
 
     HADOOP-4268. Change fsck to use ClientProtocol methods so that the

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/fs/TestChecksumFileSystem.java Wed Jan  7
14:16:43 2009
@@ -21,7 +21,6 @@
 import java.net.URI;
 
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.InMemoryFileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import junit.framework.TestCase;
@@ -39,34 +38,6 @@
                  ChecksumFileSystem.getChecksumLength(10000000000000L, 10));    
   } 
   
-  // cehck that the checksum file is deleted for Checksum file system.
-  public void testDeletionOfCheckSum() throws Exception {
-    Configuration conf = new Configuration();
-    URI uri = URI.create("ramfs://mapoutput" + "_tmp");
-    InMemoryFileSystem inMemFs =  (InMemoryFileSystem)FileSystem.get(uri, conf);
-    Path testPath = new Path("/file_1");
-    inMemFs.reserveSpaceWithCheckSum(testPath, 1024);
-    FSDataOutputStream fout = inMemFs.create(testPath);
-    fout.write("testing".getBytes());
-    fout.close();
-    assertTrue("checksum exists", inMemFs.exists(inMemFs.getChecksumFile(testPath)));
-    inMemFs.delete(testPath, true);
-    assertTrue("checksum deleted", !inMemFs.exists(inMemFs.getChecksumFile(testPath)));
-    // check for directories getting deleted.
-    testPath = new Path("/tesdir/file_1");
-    inMemFs.reserveSpaceWithCheckSum(testPath, 1024);
-    fout = inMemFs.create(testPath);
-    fout.write("testing".getBytes());
-    fout.close();
-    testPath = new Path("/testdir/file_2");
-    inMemFs.reserveSpaceWithCheckSum(testPath, 1024);
-    fout = inMemFs.create(testPath);
-    fout.write("testing".getBytes());
-    fout.close();
-    inMemFs.delete(testPath, true);
-    assertTrue("nothing in the namespace", inMemFs.listStatus(new Path("/")).length == 0);
-  }
-  
   public void testVerifyChecksum() throws Exception {
     String TEST_ROOT_DIR
     = System.getProperty("test.build.data","build/test/data/work-dir/localfs");

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSShell.java Wed Jan  7 14:16:43
2009
@@ -581,15 +581,11 @@
 
   public void testCopyToLocal() throws IOException {
     Configuration conf = new Configuration();
-    /* This tests some properties of ChecksumFileSystem as well.
-     * Make sure that we create ChecksumDFS */
-    conf.set("fs.hdfs.impl",
-             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof ChecksumDistributedFileSystem);
-    ChecksumDistributedFileSystem dfs = (ChecksumDistributedFileSystem)fs;
+               fs instanceof DistributedFileSystem);
+    DistributedFileSystem dfs = (DistributedFileSystem)fs;
     FsShell shell = new FsShell();
     shell.setConf(conf);
 
@@ -872,13 +868,11 @@
     Configuration conf = new Configuration();
     /* This tests some properties of ChecksumFileSystem as well.
      * Make sure that we create ChecksumDFS */
-    conf.set("fs.hdfs.impl",
-             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
-            fs instanceof ChecksumDistributedFileSystem);
-    ChecksumDistributedFileSystem fileSys = (ChecksumDistributedFileSystem)fs;
+            fs instanceof DistributedFileSystem);
+    DistributedFileSystem fileSys = (DistributedFileSystem)fs;
     FsShell shell = new FsShell();
     shell.setConf(conf);
 
@@ -937,56 +931,6 @@
       }
       fileSys.delete(myFile2, true);
 
-      // Verify that we can get with and without crc
-      {
-        File testFile = new File(TEST_ROOT_DIR, "mkdirs/myFile");
-        File checksumFile = new File(fileSys.getChecksumFile(
-                                                             new Path(testFile.getAbsolutePath())).toString());
-        testFile.delete();
-        checksumFile.delete();
-          
-        String[] args = new String[3];
-        args[0] = "-get";
-        args[1] = "/test/mkdirs";
-        args[2] = TEST_ROOT_DIR;
-        int val = -1;
-        try {
-          val = shell.run(args);
-        } catch (Exception e) {
-          System.err.println("Exception raised from DFSShell.run " +
-                             e.getLocalizedMessage()); 
-        }
-        assertTrue(val == 0);
-        assertTrue("Copying failed.", testFile.exists());
-        assertTrue("Checksum file " + checksumFile+" is copied.", !checksumFile.exists());
-        testFile.delete();
-      }
-      {
-        File testFile = new File(TEST_ROOT_DIR, "mkdirs/myFile");
-        File checksumFile = new File(fileSys.getChecksumFile(
-                                                             new Path(testFile.getAbsolutePath())).toString());
-        testFile.delete();
-        checksumFile.delete();
-          
-        String[] args = new String[4];
-        args[0] = "-get";
-        args[1] = "-crc";
-        args[2] = "/test/mkdirs";
-        args[3] = TEST_ROOT_DIR;
-        int val = -1;
-        try {
-          val = shell.run(args);
-        } catch (Exception e) {
-          System.err.println("Exception raised from DFSShell.run " +
-                             e.getLocalizedMessage()); 
-        }
-        assertTrue(val == 0);
-          
-        assertTrue("Copying data file failed.", testFile.exists());
-        assertTrue("Checksum file " + checksumFile+" not copied.", checksumFile.exists());
-        testFile.delete();
-        checksumFile.delete();
-      }
       // Verify that we get an error while trying to read an nonexistent file
       {
         String[] args = new String[2];

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java Wed Jan  7 14:16:43
2009
@@ -22,7 +22,6 @@
 import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
-import org.apache.hadoop.fs.ChecksumFileSystem;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -203,13 +202,13 @@
   /**
    * Tests read/seek/getPos/skipped opeation for input stream.
    */
-  private void testChecker(ChecksumFileSystem fileSys, boolean readCS)
+  private void testChecker(FileSystem fileSys, boolean readCS)
   throws Exception {
     Path file = new Path("try.dat");
     if( readCS ) {
       writeFile(fileSys, file);
     } else {
-      writeFile(fileSys.getRawFileSystem(), file);
+      writeFile(fileSys, file);
     }
     stm = fileSys.open(file);
     checkReadAndGetPos();
@@ -283,13 +282,11 @@
     Configuration conf = new Configuration();
     conf.setLong("dfs.block.size", BLOCK_SIZE);
     conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
-    conf.set("fs.hdfs.impl",
-             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
     rand.nextBytes(expected);
 
     // test DFS
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
-    ChecksumFileSystem fileSys = (ChecksumFileSystem)cluster.getFileSystem();
+    FileSystem fileSys = cluster.getFileSystem();
     try {
       testChecker(fileSys, true);
       testChecker(fileSys, false);
@@ -312,7 +309,7 @@
     }
   }
 
-  private void testSeekAndRead(ChecksumFileSystem fileSys)
+  private void testSeekAndRead(FileSystem fileSys)
   throws IOException {
     Path file = new Path("try.dat");
     writeFile(fileSys, file);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java Wed Jan  7 14:16:43
2009
@@ -113,8 +113,6 @@
     Configuration conf = new Configuration();
     conf.setLong("dfs.block.size", BLOCK_SIZE);
     conf.setInt("io.bytes.per.checksum", BYTES_PER_CHECKSUM);
-    conf.set("fs.hdfs.impl",
-             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
     MiniDFSCluster cluster = new MiniDFSCluster(
         conf, NUM_OF_DATANODES, true, null);
     fileSys = cluster.getFileSystem();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java Wed Jan
 7 14:16:43 2009
@@ -36,8 +36,6 @@
     FileSystem fileSys = null;
     try {
       JobConf conf = new JobConf();
-      conf.set("fs.hdfs.impl",
-               "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");      
       dfs = new MiniDFSCluster(conf, 1, true, null);
       fileSys = dfs.getFileSystem();
       mr = new MiniMRCluster(2, fileSys.getName(), 4);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java?rev=732533&r1=732532&r2=732533&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/lib/TestDelegatingInputFormat.java
Wed Jan  7 14:16:43 2009
@@ -37,8 +37,6 @@
 
   public void testSplitting() throws Exception {
     JobConf conf = new JobConf();
-    conf.set("fs.hdfs.impl",
-       "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");
     MiniDFSCluster dfs = null;
     try {
       dfs = new MiniDFSCluster(conf, 4, true, new String[] { "/rack0",



Mime
View raw message