hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r711734 - in /hadoop/core/trunk: ./ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/hdfs/org/apache/hadoop/hdfs/tools/ src/test/org/apache/hadoop/hdfs/server/namenode/
Date Wed, 05 Nov 2008 23:03:35 GMT
Author: hairong
Date: Wed Nov  5 15:03:35 2008
New Revision: 711734

URL: http://svn.apache.org/viewvc?rev=711734&view=rev
Log:
HADOOP-4530. In fsck, HttpServletResponse sendError fails with IllegalStateException. Contributed
by Hairong Kuang.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=711734&r1=711733&r2=711734&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Nov  5 15:03:35 2008
@@ -88,6 +88,9 @@
 
     HADOOP-4587. Fix a typo in Mapper javadoc.  (Koji Noguchi via szetszwo)
 
+    HADOOP-4530. In fsck, HttpServletResponse sendError fails with
+    IllegalStateException. (hairong)
+
 Release 0.19.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?rev=711734&r1=711733&r2=711734&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java Wed
Nov  5 15:03:35 2008
@@ -19,7 +19,6 @@
 
 import java.util.*;
 import java.io.*;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.conf.*;
 import org.apache.commons.logging.*;
 import javax.servlet.ServletContext;
@@ -32,26 +31,15 @@
  * This class is used in Namesystem's jetty to do fsck on namenode.
  */
 public class FsckServlet extends HttpServlet {
-
-  private static final Log LOG = LogFactory.getLog(FSNamesystem.class.getName());
-
   @SuppressWarnings("unchecked")
   public void doGet(HttpServletRequest request,
                     HttpServletResponse response
                     ) throws ServletException, IOException {
     Map<String,String[]> pmap = request.getParameterMap();
-    try {
-      ServletContext context = getServletContext();
-      NameNode nn = (NameNode) context.getAttribute("name.node");
-      Configuration conf = (Configuration) context.getAttribute("name.conf");
-      NamenodeFsck fscker = new NamenodeFsck(conf, nn, pmap, response);
-      fscker.fsck();
-    } catch (IOException ie) {
-      StringUtils.stringifyException(ie);
-      LOG.warn(ie);
-      String errMsg = "Fsck on path " + pmap.get("path") + " failed.";
-      response.sendError(HttpServletResponse.SC_GONE, errMsg);
-      throw ie;
-    }
+    ServletContext context = getServletContext();
+    NameNode nn = (NameNode) context.getAttribute("name.node");
+    Configuration conf = (Configuration) context.getAttribute("name.conf");
+    NamenodeFsck fscker = new NamenodeFsck(conf, nn, pmap, response);
+    fscker.fsck();
   }
 }

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=711734&r1=711733&r2=711734&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Wed
Nov  5 15:03:35 2008
@@ -68,6 +68,12 @@
 public class NamenodeFsck {
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   
+  // return string marking fsck status
+  public static final String CORRUPT_STATUS = "is CORRUPT";
+  public static final String HEALTHY_STATUS = "is HEALTHY";
+  public static final String NONEXISTENT_STATUS = "does not exist";
+  public static final String FAILURE_STATUS = "FAILED";
+  
   /** Don't attempt any fixing . */
   public static final int FIXING_NONE = 0;
   /** Move corrupted files to /lost+found . */
@@ -139,13 +145,18 @@
         // of file system and return appropriate code. Changing the output string
         // might break testcases. 
         if (res.isHealthy()) {
-          out.println("\n\nThe filesystem under path '" + path + "' is HEALTHY");
+          out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS);
         }  else {
-          out.println("\n\nThe filesystem under path '" + path + "' is CORRUPT");
+          out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS);
         }
       } else {
-        out.println("\n\nPath '" + path + "' does not exist.");
+        out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS);
       }
+    } catch (Exception e) {
+      String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS;
+      LOG.warn(errMsg, e);
+      out.println(e.getMessage());
+      out.print("\n\n"+errMsg);
     } finally {
       out.close();
     }

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java?rev=711734&r1=711733&r2=711734&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java Wed Nov  5 15:03:35
2008
@@ -27,6 +27,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -123,19 +124,23 @@
     BufferedReader input = new BufferedReader(new InputStreamReader(
                                               stream, "UTF-8"));
     String line = null;
-    int errCode = 0;
-    // errCode returned indicating the status of Filesystem (HEALTHY/CORRUPT)
-    // depends on the format of the string. Changing the script might break
-    // fsck related testcases. For now, we scan for "is CORRUPT" as it unique.
+    String lastLine = null;
+    int errCode = -1;
     try {
       while ((line = input.readLine()) != null) {
         System.out.println(line);
-        if (line.contains("is CORRUPT"))
-          errCode = 1;
+        lastLine = line;
       }
     } finally {
       input.close();
     }
+    if (lastLine.endsWith(NamenodeFsck.HEALTHY_STATUS)) {
+      errCode = 0;
+    } else if (lastLine.endsWith(NamenodeFsck.CORRUPT_STATUS)) {
+      errCode = 1;
+    } else if (lastLine.endsWith(NamenodeFsck.NONEXISTENT_STATUS)) {
+      errCode = 0;
+    }
     return errCode;
   }
 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=711734&r1=711733&r2=711734&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Nov
 5 15:03:35 2008
@@ -26,7 +26,6 @@
 import java.lang.Exception;
 import java.io.IOException;
 import java.nio.channels.FileChannel;
-import java.nio.ByteBuffer;
 import java.util.Random;
 
 import junit.framework.TestCase;
@@ -77,7 +76,7 @@
       util.createFiles(fs, "/srcdat");
       util.waitReplication(fs, "/srcdat", (short)3);
       String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(-1 != outStr.indexOf("HEALTHY"));
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       System.out.println(outStr);
       if (fs != null) {try{fs.close();} catch(Exception e){}}
       cluster.shutdown();
@@ -86,7 +85,7 @@
       cluster = new MiniDFSCluster(conf, 0, false, null);
       outStr = runFsck(conf, 1, true, "/");
       // expect the result is corrupt
-      assertTrue(outStr.contains("CORRUPT"));
+      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
       System.out.println(outStr);
       
       // bring up data nodes & cleanup cluster
@@ -112,7 +111,7 @@
       util.createFiles(fs, "/srcdat");
       util.waitReplication(fs, "/srcdat", (short)3);
       String outStr = runFsck(conf, 0, true, "/non-existent");
-      assertEquals(-1, outStr.indexOf("HEALTHY"));
+      assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
       System.out.println(outStr);
       util.cleanup(fs, "/srcdat");
     } finally {
@@ -135,7 +134,7 @@
       util.createFiles(fs, topDir);
       util.waitReplication(fs, topDir, (short)3);
       String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains("HEALTHY"));
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       
       // Corrupt a block by deleting it
       String[] fileNames = util.getFileNames(topDir);
@@ -154,7 +153,7 @@
 
       // We excpect the filesystem to be corrupted
       outStr = runFsck(conf, 1, false, "/");
-      while (!outStr.contains("CORRUPT")) {
+      while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
         try {
           Thread.sleep(100);
         } catch (InterruptedException ignore) {
@@ -164,11 +163,11 @@
       
       // Fix the filesystem by moving corrupted files to lost+found
       outStr = runFsck(conf, 1, true, "/", "-move");
-      assertTrue(outStr.contains("CORRUPT"));
+      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
       
       // Check to make sure we have healthy filesystem
       outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains("HEALTHY")); 
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); 
       util.cleanup(fs, topDir);
       if (fs != null) {try{fs.close();} catch(Exception e){}}
       cluster.shutdown();
@@ -192,7 +191,7 @@
       util.createFiles(fs, topDir);
       util.waitReplication(fs, topDir, (short)3);
       String outStr = runFsck(conf, 0, true, "/");
-      assertTrue(outStr.contains("HEALTHY"));
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       // Open a file for writing and do not close for now
       Path openFile = new Path(topDir + "/openFile");
       FSDataOutputStream out = fs.create(openFile);
@@ -204,7 +203,7 @@
       // We expect the filesystem to be HEALTHY and show one open file
       outStr = runFsck(conf, 0, true, topDir);
       System.out.println(outStr);
-      assertTrue(outStr.contains("HEALTHY"));
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       assertFalse(outStr.contains("OPENFORWRITE")); 
       // Use -openforwrite option to list open files
       outStr = runFsck(conf, 0, true, topDir, "-openforwrite");
@@ -216,7 +215,7 @@
       // Now, fsck should show HEALTHY fs and should not show any open files
       outStr = runFsck(conf, 0, true, topDir);
       System.out.println(outStr);
-      assertTrue(outStr.contains("HEALTHY"));
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
       assertFalse(outStr.contains("OPENFORWRITE"));
       util.cleanup(fs, topDir);
       if (fs != null) {try{fs.close();} catch(Exception e){}}
@@ -237,7 +236,9 @@
     Random random = new Random();
     String outStr = null;
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = null;
+    try {
+    cluster = new MiniDFSCluster(conf, 3, true, null);
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/testCorruptBlock");
@@ -249,7 +250,7 @@
     // Make sure filesystem is in healthy state
     outStr = runFsck(conf, 0, true, "/");
     System.out.println(outStr);
-    assertTrue(outStr.contains("HEALTHY"));
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     
     // corrupt replicas 
     File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
@@ -293,9 +294,45 @@
     // Check if fsck reports the same
     outStr = runFsck(conf, 1, true, "/");
     System.out.println(outStr);
-    assertTrue(outStr.contains("CORRUPT"));
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
     assertTrue(outStr.contains("testCorruptBlock"));
-
-    cluster.shutdown();
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+  
+  /** Test if fsck can return -1 in case of failure
+   * 
+   * @throws Exception
+   */
+  public void testFsckError() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      // bring up a one-node cluster
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      String fileName = "/test.txt";
+      Path filePath = new Path(fileName);
+      FileSystem fs = cluster.getFileSystem();
+      
+      // create a one-block file
+      DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
+      DFSTestUtil.waitReplication(fs, filePath, (short)1);
+      
+      // intentionally corrupt NN data structure
+      INodeFile node = (INodeFile)cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
+      assertEquals(node.blocks.length, 1);
+      node.blocks[0].setNumBytes(-1L);  // set the block length to be negative
+      
+      // run fsck and expect a failure with -1 as the error code
+      String outStr = runFsck(conf, -1, true, fileName);
+      System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
+      
+      // clean up file system
+      fs.delete(filePath, true);
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
   }
 }



Mime
View raw message