hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dhr...@apache.org
Subject svn commit: r943306 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/
Date Tue, 11 May 2010 21:47:14 GMT
Author: dhruba
Date: Tue May 11 21:47:13 2010
New Revision: 943306

URL: http://svn.apache.org/viewvc?rev=943306&view=rev
Log:
HDFS-1138. Prevent erroneous updation of modification time of a directory
when fsimage loads. (Dmytro Molkov via dhruba)

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=943306&r1=943305&r2=943306&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue May 11 21:47:13 2010
@@ -878,6 +878,9 @@ Release 0.21.0 - Unreleased
     HDFS-1141. Closing a file is successful only if the client still has a
     valid lease. (Todd Lipcon via dhruba)
 
+    HDFS-1138. Prevent erroneous updation of modification time of a directory
+    when fsimage loads. (Dmytro Molkov via dhruba)
+
 Release 0.20.3 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=943306&r1=943305&r2=943306&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Tue
May 11 21:47:13 2010
@@ -239,7 +239,8 @@ class FSDirectory implements Closeable {
                               long atime,
                               long nsQuota,
                               long dsQuota,
-                              long preferredBlockSize) 
+                              long preferredBlockSize,
+                              boolean propagateModTime) 
                               throws UnresolvedLinkException {
     // NOTE: This does not update space counts for parents
     // create new inode
@@ -264,7 +265,8 @@ class FSDirectory implements Closeable {
     INodeDirectory newParent = null;
     synchronized (rootDir) {
       try {
-        newParent = rootDir.addToParent(src, newNode, parentINode, false);
+        newParent = rootDir.addToParent(src, newNode, parentINode,
+                                        false, propagateModTime);
       } catch (FileNotFoundException e) {
         return null;
       }
@@ -1507,7 +1509,7 @@ class FSDirectory implements Closeable {
       throw new NullPointerException("Panic: parent does not exist");
     }
     T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild(
-        child, inheritPermission);
+        child, inheritPermission, true);
     if (addedNode == null) {
       updateCount(pathComponents, pos, -counts.getNsCount(), 
           -childDiskspace, true);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=943306&r1=943305&r2=943306&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Tue May
11 21:47:13 2010
@@ -1157,9 +1157,10 @@ public class FSImage extends Storage {
           parentPath = getParent(path);
         }
         // add new inode
+        // without propagating modification time to parent
         parentINode = fsDir.addToParent(path, parentINode, permissions,
                                         blocks, symlink, replication, modificationTime, 
-                                        atime, nsQuota, dsQuota, blockSize);
+                                        atime, nsQuota, dsQuota, blockSize, false);
       }
       
       // load datanode info

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=943306&r1=943305&r2=943306&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
Tue May 11 21:47:13 2010
@@ -260,10 +260,14 @@ class INodeDirectory extends INode {
    * 
    * @param node INode to insert
    * @param inheritPermission inherit permission from parent?
+   * @param setModTime set modification time for the parent node
+   *                   not needed when replaying the addition and 
+   *                   the parent already has the proper mod time
    * @return  null if the child with this name already exists; 
    *          node, otherwise
    */
-  <T extends INode> T addChild(final T node, boolean inheritPermission) {
+  <T extends INode> T addChild(final T node, boolean inheritPermission,
+                                              boolean setModTime) {
     if (inheritPermission) {
       FsPermission p = getFsPermission();
       //make sure the  permission has wx for the user
@@ -283,7 +287,8 @@ class INodeDirectory extends INode {
     node.parent = this;
     children.add(-low - 1, node);
     // update modification time of the parent directory
-    setModificationTime(node.getModificationTime());
+    if (setModTime)
+      setModificationTime(node.getModificationTime());
     if (node.getGroupName() == null) {
       node.setGroup(getGroupName());
     }
@@ -312,7 +317,7 @@ class INodeDirectory extends INode {
    */
   <T extends INode> T addNode(String path, T newNode, boolean inheritPermission
       ) throws FileNotFoundException, UnresolvedLinkException  {
-    if(addToParent(path, newNode, null, inheritPermission) == null)
+    if(addToParent(path, newNode, null, inheritPermission, true) == null)
       return null;
     return newNode;
   }
@@ -333,6 +338,16 @@ class INodeDirectory extends INode {
                                       boolean inheritPermission
                                     ) throws FileNotFoundException, 
                                              UnresolvedLinkException {
+    return addToParent(path, newNode, parent, inheritPermission, true);
+  }
+  <T extends INode> INodeDirectory addToParent(
+                                      String path,
+                                      T newNode,
+                                      INodeDirectory parent,
+                                      boolean inheritPermission,
+                                      boolean propagateModTime
+                                    ) throws FileNotFoundException, 
+                                             UnresolvedLinkException {
     byte[][] pathComponents = getPathComponents(path);
     assert pathComponents != null : "Incorrect path " + path;
     int pathLen = pathComponents.length;
@@ -353,7 +368,7 @@ class INodeDirectory extends INode {
     }
     // insert into the parent children list
     newNode.name = pathComponents[pathLen-1];
-    if(parent.addChild(newNode, inheritPermission) == null)
+    if(parent.addChild(newNode, inheritPermission, propagateModTime) == null)
       return null;
     return parent;
   }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=943306&r1=943305&r2=943306&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java Tue May 11
21:47:13 2010
@@ -72,6 +72,26 @@ public class TestRestartDFS extends Test
       final FileStatus newdirstatus = fs.getFileStatus(dirpath);
       assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
       assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
+      rootmtime = fs.getFileStatus(rootpath).getModificationTime();
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+    try {
+      // This is a second restart to check that after the first restart
+      // the image written in parallel to both places did not get corrupted
+      cluster = new MiniDFSCluster(conf, 4, false, null);
+      FileSystem fs = cluster.getFileSystem();
+      assertTrue("Filesystem corrupted after restart.",
+                 files.checkFiles(fs, dir));
+
+      final FileStatus newrootstatus = fs.getFileStatus(rootpath);
+      assertEquals(rootmtime, newrootstatus.getModificationTime());
+      assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
+      assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
+
+      final FileStatus newdirstatus = fs.getFileStatus(dirpath);
+      assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
+      assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
 
       files.cleanup(fs, dir);
     } finally {



Mime
View raw message