hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r1038222 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/name...
Date Tue, 23 Nov 2010 17:42:06 GMT
Author: hairong
Date: Tue Nov 23 17:42:05 2010
New Revision: 1038222

URL: http://svn.apache.org/viewvc?rev=1038222&view=rev
Log:
HDFS-1482. Add listCorruptFileBlocks to DistributedFileSystem. Contributed by Patrick Kling.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
    hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_files.jsp

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Nov 23 17:42:05 2010
@@ -6,10 +6,15 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
+    HDFS-1482. Add listCorruptFileBlocks to DistributedFileSystem.
+    (Patrick Kling via hairong)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)
 
+    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -230,8 +235,6 @@ Release 0.22.0 - Unreleased
 
     HDFS-1513. Fix a number of warnings. (eli)
 
-    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
-
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/fs/Hdfs.java Tue Nov 23 17:42:05 2010
@@ -303,6 +303,16 @@ public class Hdfs extends AbstractFileSy
     return listing.toArray(new FileStatus[listing.size()]);
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CorruptFileBlocks listCorruptFileBlocks(String path,
+                                                 String cookie)
+    throws IOException {
+    return dfs.listCorruptFileBlocks(path, cookie);
+  }
+
   @Override
   public void mkdir(Path dir, FsPermission permission, boolean createParent)
     throws IOException, UnresolvedLinkException {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Tue Nov 23 17:42:05 2010
@@ -61,6 +61,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.CorruptFileBlocks;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -1116,6 +1117,16 @@ public class DFSClient implements FSCons
     return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
   }
   
+  /**
+   * @return a list in which each entry describes a corrupt file/block
+   * @throws IOException
+   */
+  public CorruptFileBlocks listCorruptFileBlocks(String path,
+                                                 String cookie)
+    throws IOException {
+    return namenode.listCorruptFileBlocks(path, cookie);
+  }
+
   public DatanodeInfo[] datanodeReport(DatanodeReportType type)
   throws IOException {
     return namenode.getDatanodeReport(type);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Tue Nov 23
17:42:05 2010
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.CorruptFileBlocks;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -601,6 +602,16 @@ public class DistributedFileSystem exten
     return dfs.getCorruptBlocksCount();
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CorruptFileBlocks listCorruptFileBlocks(String path,
+                                                 String cookie)
+    throws IOException {
+    return dfs.listCorruptFileBlocks(path, cookie);
+  }
+
   /** Return statistics for each datanode. */
   public DatanodeInfo[] getDataNodeStats() throws IOException {
     return dfs.datanodeReport(DatanodeReportType.ALL);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Nov
23 17:42:05 2010
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileAlreadyE
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.CorruptFileBlocks;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -67,9 +68,9 @@ public interface ClientProtocol extends 
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 64: Remove ClientProtocol changes related to -list-corruptfiles
+   * 65: Add listCorruptFileBlocks to ClientProtocol
    */
-  public static final long versionID = 64L;
+  public static final long versionID = 65L;
   
   ///////////////////////////////////////
   // File contents
@@ -658,6 +659,20 @@ public interface ClientProtocol extends 
       throws IOException;
 
   /**
+   * @return CorruptFileBlocks, containing a list of corrupt files (with
+   *         duplicates if there is more than one corrupt block in a file)
+   *         and a cookie
+   * @throws IOException
+   *
+   * Each call returns a subset of the corrupt files in the system. To obtain
+   * all corrupt files, call this method repeatedly and each time pass in the
+   * cookie returned from the previous call.
+   */
+  public CorruptFileBlocks
+    listCorruptFileBlocks(String path, String cookie)
+    throws IOException;
+  
+  /**
    * Dumps namenode data structures into specified file. If the file
    * already exists, then append.
    *

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Nov
23 17:42:05 2010
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.CorruptFileBlocks;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -1125,19 +1126,23 @@ public class NameNode implements Namenod
   }
 
   /**
-   * 
-   * @param path
-   *          Sub-tree used in querying corrupt files
-   * @param startBlockAfter
-   *          Paging support---pass in the last block returned from the previous
-   *          call and some # of corrupt blocks after that point are returned
-   * @return a list in which each entry describes a corrupt file/block
-   * @throws AccessControlException
-   * @throws IOException
+   * {@inheritDoc}
    */
-  public Collection<FSNamesystem.CorruptFileBlockInfo> listCorruptFileBlocks(String
path,
-      String startBlockAfter) throws AccessControlException, IOException {
-    return namesystem.listCorruptFileBlocks(path, startBlockAfter);
+  @Override
+  public CorruptFileBlocks
+    listCorruptFileBlocks(String path, String cookie) 
+    throws IOException {
+    Collection<FSNamesystem.CorruptFileBlockInfo> fbs =
+      namesystem.listCorruptFileBlocks(path, cookie);
+    
+    String[] files = new String[fbs.size()];
+    String lastCookie = "";
+    int i = 0;
+    for(FSNamesystem.CorruptFileBlockInfo fb: fbs) {
+      files[i++] = fb.path;
+      lastCookie = fb.block.getBlockName();
+    }
+    return new CorruptFileBlocks(files, lastCookie);
   }
   
   /** {@inheritDoc} */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue
Nov 23 17:42:05 2010
@@ -216,8 +216,8 @@ public class NamenodeFsck {
  
   private void listCorruptFileBlocks() throws AccessControlException,
       IOException {
-    Collection<FSNamesystem.CorruptFileBlockInfo> corruptFiles = namenode
-        .listCorruptFileBlocks(path, startBlockAfter);
+    Collection<FSNamesystem.CorruptFileBlockInfo> corruptFiles = namenode.
+      getNamesystem().listCorruptFileBlocks(path, startBlockAfter);
     int numCorruptFiles = corruptFiles.size();
     String filler;
     if (numCorruptFiles > 0) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
Tue Nov 23 17:42:05 2010
@@ -65,8 +65,8 @@ public class TestCorruptFilesJsp  {
 
       // verify there are not corrupt files
       final NameNode namenode = cluster.getNameNode();
-      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode
-          .listCorruptFileBlocks("/", null);
+      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
+        getNamesystem().listCorruptFileBlocks("/", null);
       assertTrue("There are " + badFiles.size()
           + " corrupt files, but expecting none", badFiles.size() == 0);
 
@@ -94,7 +94,7 @@ public class TestCorruptFilesJsp  {
       }
 
       // verify if all corrupt files were reported to NN
-      badFiles = namenode.listCorruptFileBlocks("/", null);
+      badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null);
       assertTrue("Expecting 3 corrupt files, but got " + badFiles.size(),
           badFiles.size() == 3);
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue
Nov 23 17:42:05 2010
@@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.CorruptFileBlocks;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -487,14 +488,14 @@ public class TestFsck extends TestCase {
 
       // wait for the namenode to see the corruption
       final NameNode namenode = cluster.getNameNode();
-      Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode
+      CorruptFileBlocks corruptFileBlocks = namenode
           .listCorruptFileBlocks("/corruptData", null);
-      int numCorrupt = corruptFileBlocks.size();
+      int numCorrupt = corruptFileBlocks.getFiles().length;
       while (numCorrupt == 0) {
         Thread.sleep(1000);
         corruptFileBlocks = namenode
             .listCorruptFileBlocks("/corruptData", null);
-        numCorrupt = corruptFileBlocks.size();
+        numCorrupt = corruptFileBlocks.getFiles().length;
       }
       outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
       System.out.println("2. bad fsck out: " + outStr);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
Tue Nov 23 17:42:05 2010
@@ -30,10 +30,12 @@ import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.BlockMissingException;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 
 /**
  * This class tests the listCorruptFileBlocks API.
@@ -64,8 +66,8 @@ public class TestListCorruptFileBlocks e
 
       // fetch bad file list from namenode. There should be none.
       final NameNode namenode = cluster.getNameNode();
-      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode
-          .listCorruptFileBlocks("/", null);
+      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
+        getNamesystem().listCorruptFileBlocks("/", null);
       assertTrue("Namenode has " + badFiles.size()
           + " corrupt files. Expecting None.", badFiles.size() == 0);
 
@@ -106,7 +108,7 @@ public class TestListCorruptFileBlocks e
       }
 
       // fetch bad file list from namenode. There should be one file.
-      badFiles = namenode.listCorruptFileBlocks("/", null);
+      badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null);
       LOG.info("Namenode has bad files. " + badFiles.size());
       assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
           badFiles.size() == 1);
@@ -133,8 +135,8 @@ public class TestListCorruptFileBlocks e
       util.createFiles(fs, "/corruptData");
 
       final NameNode namenode = cluster.getNameNode();
-      Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode
-          .listCorruptFileBlocks("/corruptData", null);
+      Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
+        namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
       int numCorrupt = corruptFileBlocks.size();
       assertTrue(numCorrupt == 0);
       // delete the blocks
@@ -159,11 +161,12 @@ public class TestListCorruptFileBlocks e
       }
 
       int count = 0;
-      corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
+      corruptFileBlocks = namenode.getNamesystem().
+        listCorruptFileBlocks("/corruptData", null);
       numCorrupt = corruptFileBlocks.size();
       while (numCorrupt < 3) {
         Thread.sleep(1000);
-        corruptFileBlocks = namenode
+        corruptFileBlocks = namenode.getNamesystem()
             .listCorruptFileBlocks("/corruptData", null);
         numCorrupt = corruptFileBlocks.size();
         count++;
@@ -178,7 +181,8 @@ public class TestListCorruptFileBlocks e
       FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks
           .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
       // now get the 2nd and 3rd file that is corrupt
-      Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks = namenode
+      Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks =
+        namenode.getNamesystem()
           .listCorruptFileBlocks("/corruptData", cfb[0].block.getBlockName());
       FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks
           .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
@@ -187,14 +191,16 @@ public class TestListCorruptFileBlocks e
       assertTrue(ncfb[0].block.getBlockName()
           .equalsIgnoreCase(cfb[1].block.getBlockName()));
 
-      corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData",
+      corruptFileBlocks = 
+        namenode.getNamesystem().listCorruptFileBlocks("/corruptData",
           ncfb[1].block.getBlockName());
       numCorrupt = corruptFileBlocks.size();
       assertTrue(numCorrupt == 0);
       // Do a listing on a dir which doesn't have any corrupt blocks and
       // validate
       util.createFiles(fs, "/goodData");
-      corruptFileBlocks = namenode.listCorruptFileBlocks("/goodData", null);
+      corruptFileBlocks = 
+        namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
       numCorrupt = corruptFileBlocks.size();
       assertTrue(numCorrupt == 0);
       util.cleanup(fs, "/corruptData");
@@ -205,7 +211,76 @@ public class TestListCorruptFileBlocks e
       }
     }
   }
-  
+
+  /**
+   * test listCorruptFileBlocks in DistributedFileSystem
+   */ 
+  public void testlistCorruptFileBlocksDFS() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.blockreport.intervalMsec", 1000);
+    conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans
+                                                           // directories
+    FileSystem fs = null;
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      DistributedFileSystem dfs = (DistributedFileSystem) fs;
+      DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
+      util.createFiles(fs, "/corruptData");
+
+      final NameNode namenode = cluster.getNameNode();
+      CorruptFileBlocks corruptFileBlocks = 
+        dfs.listCorruptFileBlocks("/corruptData", null);
+      int numCorrupt = corruptFileBlocks.getFiles().length;
+      assertTrue(numCorrupt == 0);
+      // delete the blocks
+      File baseDir = new File(System.getProperty("test.build.data",
+          "build/test/data"), "dfs/data");
+      for (int i = 0; i < 8; i++) {
+        File data_dir = new File(baseDir, "data" + (i + 1)
+            + MiniDFSCluster.FINALIZED_DIR_NAME);
+        File[] blocks = data_dir.listFiles();
+        if (blocks == null)
+          continue;
+        // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
+        // (blocks.length > 0));
+        for (int idx = 0; idx < blocks.length; idx++) {
+          if (!blocks[idx].getName().startsWith("blk_")) {
+            continue;
+          }
+          LOG.info("Deliberately removing file " + blocks[idx].getName());
+          assertTrue("Cannot remove file.", blocks[idx].delete());
+          // break;
+        }
+      }
+
+      int count = 0;
+      corruptFileBlocks = dfs.listCorruptFileBlocks("/corruptData", null);
+      numCorrupt = corruptFileBlocks.getFiles().length;
+      while (numCorrupt < 3) {
+        Thread.sleep(1000);
+        corruptFileBlocks = dfs.listCorruptFileBlocks("/corruptData", null);
+        numCorrupt = corruptFileBlocks.getFiles().length;
+        count++;
+        if (count > 30)
+          break;
+      }
+      // Validate we get all the corrupt files
+      LOG.info("Namenode has bad files. " + numCorrupt);
+      assertTrue(numCorrupt == 3);
+
+      util.cleanup(fs, "/corruptData");
+      util.cleanup(fs, "/goodData");
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+    
   /** check if NN.listCorruptFiles() returns the right limit */
   public void testMaxCorruptFiles() throws Exception {
     MiniDFSCluster cluster = null;
@@ -226,8 +301,8 @@ public class TestListCorruptFileBlocks e
 
       // verify that there are no bad blocks.
       final NameNode namenode = cluster.getNameNode();
-      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode
-          .listCorruptFileBlocks("/srcdat2", null);
+      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
+        getNamesystem().listCorruptFileBlocks("/srcdat2", null);
       assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
           badFiles.size() == 0);
 
@@ -248,14 +323,17 @@ public class TestListCorruptFileBlocks e
         }
       }
 
-      badFiles = namenode.listCorruptFileBlocks("/srcdat2", null);
+      badFiles = 
+        namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
         
        while (badFiles.size() < maxCorruptFileBlocks) {
         LOG.info("# of corrupt files is: " + badFiles.size());
         Thread.sleep(10000);
-        badFiles = namenode.listCorruptFileBlocks("/srcdat2", null);
+        badFiles = namenode.getNamesystem().
+          listCorruptFileBlocks("/srcdat2", null);
       }
-      badFiles = namenode.listCorruptFileBlocks("/srcdat2", null); 
+      badFiles = namenode.getNamesystem().
+        listCorruptFileBlocks("/srcdat2", null); 
       LOG.info("Namenode has bad files. " + badFiles.size());
       assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + 
           maxCorruptFileBlocks + ".",

Modified: hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_files.jsp
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_files.jsp?rev=1038222&r1=1038221&r2=1038222&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_files.jsp (original)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_files.jsp Tue Nov 23 17:42:05 2010
@@ -34,7 +34,7 @@
   String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":"
       + nn.getNameNodeAddress().getPort();
   Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
-	nn.listCorruptFileBlocks("/", null);
+	fsn.listCorruptFileBlocks("/", null);
   int corruptFileCount = corruptFileBlocks.size();
 %>
 



Mime
View raw message