hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r920713 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/
Date Tue, 09 Mar 2010 06:52:05 GMT
Author: dhruba
Date: Tue Mar  9 06:52:05 2010
New Revision: 920713

URL: http://svn.apache.org/viewvc?rev=920713&view=rev
Log:
HDFS-729. NameNode API to list files that have missing blocks.
(Rodrigo Schmidt via dhruba)


Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Mar  9 06:52:05 2010
@@ -89,6 +89,9 @@ Trunk (unreleased changes)
 
     HDFS-998. Quote blocks streamed through jsps. (cdouglas)
 
+    HDFS-729. NameNode API to list files that have missing blocks.
+    (Rodrigo Schmidt via dhruba)
+
   OPTIMIZATIONS
 
     HDFS-946. NameNode should not return full path name when lisitng a

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Tue Mar
 9 06:52:05 2010
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -54,9 +55,9 @@ public interface ClientProtocol extends 
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 58: Add symlink APIs.
+   * 59: Add API to retrive corrupted block list
    */
-  public static final long versionID = 58L;
+  public static final long versionID = 59L;
   
   ///////////////////////////////////////
   // File contents
@@ -531,6 +532,15 @@ public interface ClientProtocol extends 
   public void metaSave(String filename) throws IOException;
 
   /**
+   * @return Array of FileStatus objects referring to corrupted files.
+   *         The server could return all or a few of the files that are corrupt.
+   * @throws AccessControlException
+   * @throws IOException
+   */
+  FileStatus[] getCorruptFiles() 
+    throws AccessControlException, IOException; 
+  
+  /**
    * Get the file info for a specific file or directory.
    * @param src The string representation of the path to the file
    * @throws UnresolvedLinkException if the path contains symlinks;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Tue
Mar  9 06:52:05 2010
@@ -24,6 +24,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -51,6 +52,7 @@ public class BlockManager {
   // Default initial capacity and load factor of map
   public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16;
   public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
+  public static final int DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
 
   private final FSNamesystem namesystem;
 
@@ -105,7 +107,9 @@ public class BlockManager {
   int minReplication;
   // Default number of replicas
   int defaultReplication;
-
+  // How many entries are returned by getCorruptInodes()
+  int maxCorruptFilesReturned;
+  
   // variable to enable check for enough racks 
   boolean shouldCheckForEnoughRacks = true;
 
@@ -140,6 +144,8 @@ public class BlockManager {
                          namesystem,
                          namesystem.clusterMap);
 
+    this.maxCorruptFilesReturned = conf.getInt("dfs.corruptfilesreturned.max",
+        DEFAULT_MAX_CORRUPT_FILES_RETURNED);
     this.defaultReplication = conf.getInt("dfs.replication", 3);
     this.maxReplication = conf.getInt("dfs.replication.max", 512);
     this.minReplication = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
@@ -1706,4 +1712,25 @@ public class BlockManager {
                                                      startingBlockId);
   }  
   
+  /**
+   * @return inodes of files with corrupt blocks, with a maximum of 
+   * MAX_CORRUPT_FILES_RETURNED inodes listed in total
+   */
+  INode[] getCorruptInodes() {
+    LinkedHashSet<INode> set = new LinkedHashSet<INode>();
+
+    for (Block blk : 
+            neededReplications.getQueue(
+                UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS)){
+      INode inode = blocksMap.getINode(blk);
+      if (inode != null && countNodes(blk).liveReplicas() == 0) {
+        set.add(inode);
+        if (set.size() >= this.maxCorruptFilesReturned) {
+          break;  
+        }
+      } 
+    }
+    return set.toArray(new INode[set.size()]);
+  }
+  
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue
Mar  9 06:52:05 2010
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -4387,6 +4388,38 @@ public class FSNamesystem implements FSC
                                                   startingBlockId);
   }
 
+  /**
+   * @return Array of FileStatus objects representing files with 
+   * corrupted blocks.
+   * @throws AccessControlException
+   * @throws IOException
+   */
+  synchronized FileStatus[] getCorruptFiles() 
+    throws AccessControlException, IOException {
+    
+    checkSuperuserPrivilege();
+    
+    INode[] inodes = blockManager.getCorruptInodes();
+    FileStatus[] ret = new FileStatus[inodes.length];
+    
+    int i = 0;
+    for (INode inode: inodes) {
+      String src = inode.getFullPathName();
+      ret[i++] = new FileStatus(inode.computeContentSummary().getLength(), 
+          inode.isDirectory(), 
+          ((INodeFile)inode).getReplication(), 
+          ((INodeFile)inode).getPreferredBlockSize(),
+          inode.getModificationTime(),
+          inode.getAccessTime(),
+          inode.getFsPermission(),
+          inode.getUserName(),
+          inode.getGroupName(),
+          new Path(src));
+    }
+
+    return ret;
+  }
+  
   public synchronized ArrayList<DatanodeDescriptor> getDecommissioningNodes() {
     ArrayList<DatanodeDescriptor> decommissioningNodes = 
         new ArrayList<DatanodeDescriptor>();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Mar
 9 06:52:05 2010
@@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
@@ -969,6 +970,14 @@ public class NameNode implements ClientP
   }
 
   /** {@inheritDoc} */
+  public FileStatus[] getCorruptFiles() 
+    throws AccessControlException, IOException {
+    
+    return namesystem.getCorruptFiles();
+    
+  }
+  
+  /** {@inheritDoc} */
   public ContentSummary getContentSummary(String path) throws IOException {
     return namesystem.getContentSummary(path);
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
Tue Mar  9 06:52:05 2010
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.B
  */
 class UnderReplicatedBlocks implements Iterable<Block> {
   static final int LEVEL = 4;
+  static public final int QUEUE_WITH_CORRUPT_BLOCKS = 2;
   private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
       
   /* constructor */
@@ -81,7 +82,7 @@ class UnderReplicatedBlocks implements I
       if (decommissionedReplicas > 0) {
         return 0;
       }
-      return 2; // keep these blocks in needed replication.
+      return QUEUE_WITH_CORRUPT_BLOCKS; // keep these blocks in needed replication.
     } else if(curReplicas==1) {
       return 0; // highest priority
     } else if(curReplicas*3<expectedReplicas) {
@@ -183,7 +184,15 @@ class UnderReplicatedBlocks implements I
                                     + " at priority level " + curPri);
     }
   }
-      
+
+  /* returns an interator of all blocks in a given priority queue */
+  public synchronized Iterable<Block> getQueue(int priority) {
+    if (priority < 0 || priority >= LEVEL) {
+      return null;
+    }
+    return priorityQueues.get(priority);
+  }
+  
   /* return an iterator of all the under replication blocks */
   public synchronized BlockIterator iterator() {
     return new BlockIterator();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Tue Mar
 9 06:52:05 2010
@@ -260,6 +260,11 @@ public class TestDFSClientRetries extend
 
     public void setTimes(String src, long mtime, long atime) throws IOException {}
 
+    public FileStatus[] getCorruptFiles()
+      throws AccessControlException, IOException {
+      return null;
+    }
+
     public void createSymlink(String target, String newpath, 
         FsPermission dirPerm, boolean createPath) 
         throws IOException, UnresolvedLinkException {}

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=920713&r1=920712&r2=920713&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Tue Mar
 9 06:52:05 2010
@@ -22,23 +22,43 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.ArrayList;
+import java.util.Random;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.log4j.Level;
 
 /**
  * A JUnit test for corrupted file handling.
  */
 public class TestFileCorruption extends TestCase {
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+  }
+  static Log LOG = ((Log4JLogger)NameNode.stateChangeLog);
+
   /** check if DFS can handle corrupted blocks properly */
   public void testFileCorruption() throws Exception {
     MiniDFSCluster cluster = null;
@@ -168,4 +188,172 @@ public class TestFileCorruption extends 
     }
     return new Block(blockId, blocks[idx].length(), blockTimeStamp);
   }
+
+  /** check if ClientProtocol.getCorruptFiles() returns a file that has missing blocks */
+  public void testCorruptFilesMissingBlock() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
+      conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      FileSystem fs = cluster.getFileSystem();
+
+      // create two files with one block each
+      DFSTestUtil util = new DFSTestUtil("testCorruptFilesMissingBlock", 2, 1, 512);
+      util.createFiles(fs, "/srcdat");
+
+      // verify that there are no bad blocks.
+      ClientProtocol namenode = DFSClient.createNamenode(conf);
+      FileStatus[] badFiles = namenode.getCorruptFiles();
+      assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting none.",
+          badFiles.length == 0);
+
+      // Now deliberately remove one block
+      File data_dir = new File(System.getProperty("test.build.data"),
+      "dfs/data/data1/current/finalized");
+      assertTrue("data directory does not exist", data_dir.exists());
+      File[] blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length
> 0));
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (!blocks[idx].getName().startsWith("blk_")) {
+          continue;
+        }
+        LOG.info("Deliberately removing file "+blocks[idx].getName());
+        assertTrue("Cannot remove file.", blocks[idx].delete());
+        break;
+      }
+
+      badFiles = namenode.getCorruptFiles();
+      while (badFiles.length == 0) {
+        Thread.sleep(1000);
+        badFiles = namenode.getCorruptFiles();
+      }
+      LOG.info("Namenode has bad files. " + badFiles.length);
+      assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 1.",
+          badFiles.length == 1);
+      util.cleanup(fs, "/srcdat");
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  /** check if ClientProtocol.getCorruptFiles() returns the right limit */
+  public void testMaxCorruptFiles() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setInt("dfs.corruptfilesreturned.max", 2);
+      conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
+      conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      FileSystem fs = cluster.getFileSystem();
+
+      // create two files with one block each
+      DFSTestUtil util = new DFSTestUtil("testMaxCorruptFiles", 4, 1, 512);
+      util.createFiles(fs, "/srcdat2");
+
+      // verify that there are no bad blocks.
+      ClientProtocol namenode = DFSClient.createNamenode(conf);
+      FileStatus[] badFiles = namenode.getCorruptFiles();
+      assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting none.",
+          badFiles.length == 0);
+
+      // Now deliberately remove one block
+      File data_dir = new File(System.getProperty("test.build.data"),
+      "dfs/data/data1/current/finalized");
+      assertTrue("data directory does not exist", data_dir.exists());
+      File[] blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length
> 0));
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (!blocks[idx].getName().startsWith("blk_")) {
+          continue;
+        }
+        LOG.info("Deliberately removing file "+blocks[idx].getName());
+        assertTrue("Cannot remove file.", blocks[idx].delete());
+      }
+
+      badFiles = namenode.getCorruptFiles();
+      while (badFiles.length < 2) {
+        badFiles = namenode.getCorruptFiles();
+        Thread.sleep(10000);
+      }
+      badFiles = namenode.getCorruptFiles(); // once more since time has passed
+      LOG.info("Namenode has bad files. " + badFiles.length);
+      assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 2.",
+          badFiles.length == 2);
+      util.cleanup(fs, "/srcdat2");
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  /** check if ClientProtocol.getCorruptFiles() returns a file that has corrupted blocks
*/
+  public void testCorruptFilesCorruptedBlock() throws Exception {
+    MiniDFSCluster cluster = null;
+    Random random = new Random();
+    
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
+      conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      FileSystem fs = cluster.getFileSystem();
+
+      // create two files with one block each
+      DFSTestUtil util = new DFSTestUtil("testCorruptFilesCorruptedBlock", 2, 1, 512);
+      util.createFiles(fs, "/srcdat10");
+
+      // fetch bad file list from namenode. There should be none.
+      ClientProtocol namenode = DFSClient.createNamenode(conf);
+      FileStatus[] badFiles = namenode.getCorruptFiles();
+      assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting None.",
+          badFiles.length == 0);
+
+      // Now deliberately corrupt one block
+      File data_dir = new File(System.getProperty("test.build.data"),
+      "dfs/data/data1/current/finalized");
+      assertTrue("data directory does not exist", data_dir.exists());
+      File[] blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length
> 0));
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (blocks[idx].getName().startsWith("blk_") &&
+            blocks[idx].getName().endsWith(".meta")) {
+          //
+          // shorten .meta file
+          //
+          RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
+          FileChannel channel = file.getChannel();
+          long position = channel.size() - 2;
+          int length = 2;
+          byte[] buffer = new byte[length];
+          random.nextBytes(buffer);
+          channel.write(ByteBuffer.wrap(buffer), position);
+          file.close();
+          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
+              " at offset " + position + " length " + length);
+
+          // read all files to trigger detection of corrupted replica
+          try {
+            util.checkFiles(fs, "/srcdat10");
+          } catch (BlockMissingException e) {
+            System.out.println("Received BlockMissingException as expected.");
+          } catch (IOException e) {
+            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException
" +
+                " but received IOException " + e, false);
+          }
+          break;
+        }
+      }
+
+      // fetch bad file list from namenode. There should be one file.
+      badFiles = namenode.getCorruptFiles();
+      LOG.info("Namenode has bad files. " + badFiles.length);
+      assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 1.",
+          badFiles.length == 1);
+      util.cleanup(fs, "/srcdat10");
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
 }



Mime
View raw message