hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r614771 - in /hadoop/core/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java src/java/org/apache/hadoop/dfs/FSNamesystem.java src/java/org/apache/hadoop/dfs/NamenodeFsck.java src/test/org/apache/hadoop/dfs/TestFsck.java
Date Thu, 24 Jan 2008 02:43:36 GMT
Author: shv
Date: Wed Jan 23 18:43:27 2008
New Revision: 614771

URL: http://svn.apache.org/viewvc?rev=614771&view=rev
Log:
HADOOP-2633. Fsck should call name-node methods directly rather than through rpc. Contributed
by Tsz Wo (Nicholas), SZE.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
    hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=614771&r1=614770&r2=614771&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Jan 23 18:43:27 2008
@@ -538,6 +538,9 @@
     HADOOP-2549. Correct disk size computation so that data-nodes could switch 
     to other local drives if current is full. (Hairong Kuang via shv)
 
+    HADOOP-2633. Fsck should call name-node methods directly rather than 
+    through rpc. (Tsz Wo (Nicholas), SZE via shv)
+
 Release 0.15.3 - 2008-01-18
 
   BUG FIXES

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java?rev=614771&r1=614770&r2=614771&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java Wed Jan 23 18:43:27
2008
@@ -2083,7 +2083,7 @@
         // Get the all the blocks.
         LocatedBlocks blockLoc = null;
         try {
-          blockLoc = getFSNamesystem().getBlockLocationsInternal(null,
+          blockLoc = getFSNamesystem().getBlockLocations(
               file.getPath().toString(), 0, file.getLen());
           int numBlocks = blockLoc.locatedBlockCount();
           for (int i=0; i<numBlocks; i++) {

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=614771&r1=614770&r2=614771&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Jan 23 18:43:27
2008
@@ -724,46 +724,45 @@
   /**
    * Get block locations within the specified range.
    * 
-   * @see ClientProtocol#open(String, long, long)
-   * @see ClientProtocol#getBlockLocations(String, long, long)
+   * @see #getBlockLocations(String, long, long)
    */
   LocatedBlocks getBlockLocations(String clientMachine, String src,
       long offset, long length) throws IOException {
     if (isPermissionEnabled) {
       checkPathAccess(src, FsAction.READ);
     }
-    return getBlockLocationsInternal(clientMachine, src, offset, length);
+
+    LocatedBlocks blocks = getBlockLocations(src, offset, length);
+    if (blocks != null) {
+      //sort the blocks
+      DatanodeDescriptor client = host2DataNodeMap.getDatanodeByHost(
+          clientMachine);
+      for (LocatedBlock b : blocks.getLocatedBlocks()) {
+        clusterMap.pseudoSortByDistance(client, b.getLocations());
+      }
+    }
+    return blocks;
   }
-  LocatedBlocks getBlockLocationsInternal(String clientMachine,
-                                  String src, 
-                                  long offset, 
-                                  long length
-                                  ) throws IOException {
+
+  /**
+   * Get block locations within the specified range.
+   * 
+   * @see ClientProtocol#open(String, long, long)
+   * @see ClientProtocol#getBlockLocations(String, long, long)
+   */
+  LocatedBlocks getBlockLocations(String src, long offset, long length
+      ) throws IOException {
     if (offset < 0) {
       throw new IOException("Negative offset is not supported. File: " + src );
     }
     if (length < 0) {
       throw new IOException("Negative length is not supported. File: " + src );
     }
-
-    DatanodeDescriptor client = null;
-    LocatedBlocks blocks =  getBlockLocationInternal(dir.getFileINode(src),
-                                              offset, length, 
-                                              Integer.MAX_VALUE);
-    if (blocks == null) {
-      return null;
-    }
-    client = host2DataNodeMap.getDatanodeByHost(clientMachine);
-    for (Iterator<LocatedBlock> it = blocks.getLocatedBlocks().iterator();
-         it.hasNext();) {
-      LocatedBlock block = it.next();
-      clusterMap.pseudoSortByDistance(client, 
-                                (DatanodeDescriptor[])(block.getLocations()));
-    }
-    return blocks;
+    return getBlockLocationsInternal(dir.getFileINode(src), offset, length,
+        Integer.MAX_VALUE);  
   }
-  
-  private synchronized LocatedBlocks getBlockLocationInternal(INodeFile inode,
+
+  private synchronized LocatedBlocks getBlockLocationsInternal(INodeFile inode,
                                                        long offset, 
                                                        long length,
                                                        int nrBlocksToReturn) {
@@ -1433,7 +1432,7 @@
    * Remove the indicated filename from the namespace.  This may
    * invalidate some blocks that make up the file.
    */
-  private synchronized boolean deleteInternal(String src, 
+  synchronized boolean deleteInternal(String src, 
       boolean enforceSafeMode, boolean enforcePermission) throws IOException {
     NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
     if (enforceSafeMode && isInSafeMode())

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?rev=614771&r1=614770&r2=614771&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Wed Jan 23 18:43:27
2008
@@ -34,6 +34,7 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 
 /**
  * This class provides rudimentary checking of DFS volumes for errors and
@@ -68,7 +69,6 @@
   public static final int FIXING_DELETE = 2;
   
   private NameNode nn;
-  private ClientProtocol namenodeproxy;
   private String lostFound = null;
   private boolean lfInited = false;
   private boolean lfInitedOk = false;
@@ -96,7 +96,6 @@
                       HttpServletResponse response) throws IOException {
     this.conf = conf;
     this.nn = nn;
-    this.namenodeproxy =DFSClient.createNamenode(nn.getNameNodeAddress(),conf);
     this.out = response.getWriter();
     for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
       String key = it.next();
@@ -116,7 +115,7 @@
    */
   public void fsck() throws IOException {
     try {
-      DFSFileInfo[] files = namenodeproxy.getListing(path);
+      DFSFileInfo[] files = nn.namesystem.dir.getListing(path);
       FsckResult res = new FsckResult();
       res.setReplication((short) conf.getInt("dfs.replication", 3));
       if (files != null) {
@@ -141,12 +140,14 @@
     res.totalRacks = nn.getNetworkTopology().getNumOfRacks();
     res.totalDatanodes = nn.getDatanodeReport(DatanodeReportType.LIVE).length;
     int minReplication = FSNamesystem.getFSNamesystem().getMinReplication();
+    String path = file.getPath().toString();
+
     if (file.isDir()) {
       if (showFiles) {
-        out.println(file.getPath().toString() + " <dir>");
+        out.println(path + " <dir>");
       }
       res.totalDirs++;
-      DFSFileInfo[] files =namenodeproxy.getListing(file.getPath().toString());
+      DFSFileInfo[] files = nn.namesystem.dir.getListing(path);
       for (int i = 0; i < files.length; i++) {
         check(files[i], res);
       }
@@ -155,11 +156,10 @@
     res.totalFiles++;
     long fileLen = file.getLen();
     res.totalSize += fileLen;
-    LocatedBlocks blocks = namenodeproxy.getBlockLocations(
-        file.getPath().toString(), 0, fileLen);
+    LocatedBlocks blocks = nn.namesystem.getBlockLocations(path, 0, fileLen);
     res.totalBlocks += blocks.locatedBlockCount();
     if (showFiles) {
-      out.print(file.getPath().toString() + " " + fileLen + " bytes, " +
+      out.print(path + " " + fileLen + " bytes, " +
           blocks.locatedBlockCount() + " block(s): ");
     }  else {
       out.print('.');
@@ -189,7 +189,7 @@
         res.numUnderReplicatedBlocks += 1;
         underReplicatedPerFile++;
         if (!showFiles) {
-          out.print("\n" + file.getPath().toString() + ": ");
+          out.print("\n" + path + ": ");
         }
         out.println(" Under replicated " + block.getBlockName() +
                     ". Target Replicas is " +
@@ -205,7 +205,7 @@
         if (!showFiles) {
           if(underReplicatedPerFile == 0)
             out.println();
-          out.print(file.getPath().toString() + ": ");
+          out.print(path + ": ");
         }
         out.println(" Replica placement policy is violated for " + 
                     block.getBlockName() +
@@ -238,9 +238,8 @@
     }
     if (missing > 0) {
       if (!showFiles) {
-        out.println("\n" + file.getPath().toString() + ": " +
-                    "MISSING " + missing + " blocks of total size " + 
-                    missize + " B.");
+        out.println("\n" + path + ": MISSING " + missing
+            + " blocks of total size " + missize + " B.");
       }
       res.corruptFiles++;
       switch(fixing) {
@@ -250,7 +249,7 @@
         lostFoundMove(file, blocks);
         break;
       case FIXING_DELETE:
-        namenodeproxy.delete(file.getPath().toString());
+        nn.namesystem.deleteInternal(path, true, false);
       }
     }
     if (showFiles) {
@@ -278,7 +277,9 @@
     String target = lostFound + file.getPath();
     String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
     try {
-      if (!namenodeproxy.mkdirs(target, file.getPermission())) {
+      PermissionStatus ps = new PermissionStatus(
+          file.getOwner(), file.getGroup(), file.getPermission()); 
+      if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
         LOG.warn(errmsg);
         return;
       }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java?rev=614771&r1=614770&r2=614771&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java Wed Jan 23 18:43:27 2008
@@ -31,29 +31,15 @@
  * A JUnit test for doing fsck
  */
 public class TestFsck extends TestCase {
- 
-  public TestFsck(String testName) {
-    super(testName);
-  }
-
-  
-  
-  @Override
-  protected void setUp() throws Exception {
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-  }
-  
   /** do fsck */
   public void testFsck() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
+    FileSystem fs = null;
     try {
       Configuration conf = new Configuration();
       cluster = new MiniDFSCluster(conf, 4, true, null);
-      FileSystem fs = cluster.getFileSystem();
+      fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       PrintStream oldOut = System.out;
       ByteArrayOutputStream bStream = new ByteArrayOutputStream();
@@ -64,6 +50,7 @@
       String outStr = bStream.toString();
       assertTrue(-1 != outStr.indexOf("HEALTHY"));
       System.out.println(outStr);
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
       cluster.shutdown();
       
       // restart the cluster; bring up namenode but not the data nodes
@@ -82,20 +69,22 @@
       // bring up data nodes & cleanup cluster
       cluster.startDataNodes(conf, 4, true, null, null);
       cluster.waitActive();
-      util.cleanup(cluster.getFileSystem(), "/srcdat");
+      fs = cluster.getFileSystem();
+      util.cleanup(fs, "/srcdat");
     } finally {
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
       if (cluster != null) { cluster.shutdown(); }
     }
   }
-  
-  /** do fsck on non-existent path*/
+
   public void testFsckNonExistent() throws Exception {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
     MiniDFSCluster cluster = null;
+    FileSystem fs = null;
     try {
       Configuration conf = new Configuration();
       cluster = new MiniDFSCluster(conf, 4, true, null);
-      FileSystem fs = cluster.getFileSystem();
+      fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       PrintStream oldOut = System.out;
       ByteArrayOutputStream bStream = new ByteArrayOutputStream();
@@ -108,7 +97,8 @@
       System.out.println(outStr);
       util.cleanup(fs, "/srcdat");
     } finally {
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
       if (cluster != null) { cluster.shutdown(); }
     }
   }
-}
+}
\ No newline at end of file



Mime
View raw message