hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r617690 - in /hadoop/core/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/FSNamesystem.java src/java/org/apache/hadoop/dfs/NamenodeFsck.java src/test/org/apache/hadoop/dfs/TestFsck.java
Date Fri, 01 Feb 2008 22:49:07 GMT
Author: dhruba
Date: Fri Feb  1 14:49:01 2008
New Revision: 617690

URL: http://svn.apache.org/viewvc?rev=617690&view=rev
Log:
HADOOP-2755. Fix fsck performance degradation because of permissions
issue.  (Tsz Wo (Nicholas), SZE via dhruba)


Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
    hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=617690&r1=617689&r2=617690&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Feb  1 14:49:01 2008
@@ -656,6 +656,9 @@
     HADOOP-2740. Fix HOD to work with the configuration variables changed in
     HADOOP-2404. (Hemanth Yamijala via omalley)
     
+    HADOOP-2755. Fix fsck performance degradation because of permissions 
+    issue.  (Tsz Wo (Nicholas), SZE via dhruba)
+
 Release 0.15.3 - 2008-01-18
 
   BUG FIXES

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=617690&r1=617689&r2=617690&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Fri Feb  1 14:49:01
2008
@@ -2801,6 +2801,10 @@
     }
   }
 
+  int getNumberOfDatanodes(DatanodeReportType type) {
+    return getDatanodeListForReport(type).size(); 
+  }
+
   private synchronized ArrayList<DatanodeDescriptor> getDatanodeListForReport(
                                                       DatanodeReportType type) {        
         
     

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?rev=617690&r1=617689&r2=617690&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Fri Feb  1 14:49:01
2008
@@ -117,6 +117,9 @@
     try {
       DFSFileInfo[] files = nn.namesystem.dir.getListing(path);
       FsckResult res = new FsckResult();
+      res.totalRacks = nn.getNetworkTopology().getNumOfRacks();
+      res.totalDatanodes = nn.namesystem.getNumberOfDatanodes(
+          DatanodeReportType.LIVE);
       res.setReplication((short) conf.getInt("dfs.replication", 3));
       if (files != null) {
         for (int i = 0; i < files.length; i++) {
@@ -137,9 +140,7 @@
   }
   
   private void check(DFSFileInfo file, FsckResult res) throws IOException {
-    res.totalRacks = nn.getNetworkTopology().getNumOfRacks();
-    res.totalDatanodes = nn.getDatanodeReport(DatanodeReportType.LIVE).length;
-    int minReplication = FSNamesystem.getFSNamesystem().getMinReplication();
+    int minReplication = nn.namesystem.getMinReplication();
     String path = file.getPath().toString();
 
     if (file.isDir()) {

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java?rev=617690&r1=617689&r2=617690&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java Fri Feb  1 14:49:01 2008
@@ -23,6 +23,8 @@
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.log4j.Level;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.util.ToolRunner;
@@ -31,6 +33,18 @@
  * A JUnit test for doing fsck
  */
 public class TestFsck extends TestCase {
+  static String runFsck(Configuration conf, String path) throws Exception {
+    PrintStream oldOut = System.out;
+    ByteArrayOutputStream bStream = new ByteArrayOutputStream();
+    PrintStream newOut = new PrintStream(bStream, true);
+    System.setOut(newOut);
+    ((Log4JLogger)PermissionChecker.LOG).getLogger().setLevel(Level.ALL);
+    assertEquals(0, ToolRunner.run(new DFSck(conf), new String[] {path}));
+    ((Log4JLogger)PermissionChecker.LOG).getLogger().setLevel(Level.INFO);
+    System.setOut(oldOut);
+    return bStream.toString();
+  }
+
   /** do fsck */
   public void testFsck() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
@@ -41,13 +55,7 @@
       cluster = new MiniDFSCluster(conf, 4, true, null);
       fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
-      PrintStream oldOut = System.out;
-      ByteArrayOutputStream bStream = new ByteArrayOutputStream();
-      PrintStream newOut = new PrintStream(bStream, true);
-      System.setOut(newOut);
-      assertEquals(0, ToolRunner.run(new DFSck(conf), new String[] {"/"}));
-      System.setOut(oldOut);
-      String outStr = bStream.toString();
+      String outStr = runFsck(conf, "/");
       assertTrue(-1 != outStr.indexOf("HEALTHY"));
       System.out.println(outStr);
       if (fs != null) {try{fs.close();} catch(Exception e){}}
@@ -55,13 +63,7 @@
       
       // restart the cluster; bring up namenode but not the data nodes
       cluster = new MiniDFSCluster(conf, 0, false, null);
-      oldOut = System.out;
-      bStream = new ByteArrayOutputStream();
-      newOut = new PrintStream(bStream, true);
-      System.setOut(newOut);
-      assertEquals(0, ToolRunner.run(new DFSck(conf), new String[] {"/"}));
-      System.setOut(oldOut);
-      outStr = bStream.toString();
+      outStr = runFsck(conf, "/");
       // expect the result is corrupt
       assertTrue(outStr.contains("CORRUPT"));
       System.out.println(outStr);
@@ -86,13 +88,7 @@
       cluster = new MiniDFSCluster(conf, 4, true, null);
       fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
-      PrintStream oldOut = System.out;
-      ByteArrayOutputStream bStream = new ByteArrayOutputStream();
-      PrintStream newOut = new PrintStream(bStream, true);
-      System.setOut(newOut);
-      assertEquals(0, ToolRunner.run(new DFSck(conf), new String[] {"/non-existent"}));
-      System.setOut(oldOut);
-      String outStr = bStream.toString();
+      String outStr = runFsck(conf, "/non-existent");
       assertEquals(-1, outStr.indexOf("HEALTHY"));
       System.out.println(outStr);
       util.cleanup(fs, "/srcdat");



Mime
View raw message