hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1134397 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Fri, 10 Jun 2011 17:57:20 GMT
Author: todd
Date: Fri Jun 10 17:57:20 2011
New Revision: 1134397

URL: http://svn.apache.org/viewvc?rev=1134397&view=rev
Log:
HDFS-2041. OP_CONCAT_DELETE doesn't properly restore modification time of the concatenated
file when edit logs are replayed. Contributed by Todd Lipcon.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1134397&r1=1134396&r2=1134397&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jun 10 17:57:20 2011
@@ -716,6 +716,9 @@ Trunk (unreleased changes)
     HDFS-1998. Federation: Make refresh-namenodes.sh refresh all the
     namenode. (Tanping Wang via suresh)
 
+    HDFS-2041. OP_CONCAT_DELETE doesn't properly restore modification time
+    of the concatenated file when edit logs are replayed. (todd)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1134397&r1=1134396&r2=1134397&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri
Jun 10 17:57:20 2011
@@ -930,10 +930,10 @@ class FSDirectory implements Closeable {
     try {
       // actual move
       waitForReady();
-
-      unprotectedConcat(target, srcs);
+      long timestamp = now();
+      unprotectedConcat(target, srcs, timestamp);
       // do the commit
-      fsImage.getEditLog().logConcat(target, srcs, now());
+      fsImage.getEditLog().logConcat(target, srcs, timestamp);
     } finally {
       writeUnlock();
     }
@@ -948,7 +948,7 @@ class FSDirectory implements Closeable {
    * Must be public because also called from EditLogs
    * NOTE: - it does not update quota (not needed for concat)
    */
-  public void unprotectedConcat(String target, String [] srcs) 
+  public void unprotectedConcat(String target, String [] srcs, long timestamp) 
       throws UnresolvedLinkException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "+target);
@@ -979,9 +979,8 @@ class FSDirectory implements Closeable {
       count++;
     }
     
-    long now = now();
-    trgInode.setModificationTimeForce(now);
-    trgParent.setModificationTime(now);
+    trgInode.setModificationTimeForce(timestamp);
+    trgParent.setModificationTime(timestamp);
     // update quota on the parent directory ('count' files removed, 0 space)
     unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0);
   }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1134397&r1=1134396&r2=1134397&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
(original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
Fri Jun 10 17:57:20 2011
@@ -238,7 +238,8 @@ public class FSEditLogLoader {
             numOpConcatDelete++;
 
             ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op;
-            fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs);
+            fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs,
+                concatDeleteOp.timestamp);
             break;
           }
           case OP_RENAME_OLD: {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1134397&r1=1134396&r2=1134397&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Fri
Jun 10 17:57:20 2011
@@ -257,7 +257,6 @@ public abstract class FSEditLogOp {
     int length;
     String path;
     long timestamp;
-    long atime;
     PermissionStatus permissions;
 
     private MkdirOp() {
@@ -280,9 +279,7 @@ public abstract class FSEditLogOp {
       // However, currently this is not being updated/used because of
       // performance reasons.
       if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
-        this.atime = readLong(in);
-      } else {
-        this.atime = 0;
+        /*unused this.atime = */readLong(in);
       }
 
       if (logVersion <= -11) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=1134397&r1=1134396&r2=1134397&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
Fri Jun 10 17:57:20 2011
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -86,16 +87,6 @@ public class TestHDFSConcat {
     }
   }
   
-  private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
-  throws Exception {
-    int val = admin.run(args);
-    if (expectEror) {
-      assertEquals(val, -1);
-    } else {
-      assertTrue(val>=0);
-    }
-  }
-
   /**
    * Concatenates 10 files into one
    * Verifies the final size, deletion of the file, number of blocks
@@ -221,6 +212,46 @@ public class TestHDFSConcat {
     assertEquals(trgLen, totalLen+sFileLen);
     
   }
+  
+  /**
+   * Test that the concat operation is properly persisted in the
+   * edit log, and properly replayed on restart.
+   */
+  @Test
+  public void testConcatInEditLog() throws Exception {
+    final Path TEST_DIR = new Path("/testConcatInEditLog");
+    final long FILE_LEN = blockSize;
+    
+    // 1. Concat some files
+    Path[] srcFiles = new Path[3];
+    for (int i = 0; i < srcFiles.length; i++) {
+      Path path = new Path(TEST_DIR, "src-" + i);
+      DFSTestUtil.createFile(dfs, path, FILE_LEN, REPL_FACTOR, 1);
+      srcFiles[i] = path;
+    }    
+    Path targetFile = new Path(TEST_DIR, "target");
+    DFSTestUtil.createFile(dfs, targetFile, FILE_LEN, REPL_FACTOR, 1);
+    
+    dfs.concat(targetFile, srcFiles);
+    
+    // 2. Verify the concat operation basically worked, and record
+    // file status.
+    assertTrue(dfs.exists(targetFile));
+    FileStatus origStatus = dfs.getFileStatus(targetFile);
+
+    // 3. Restart NN to force replay from edit log
+    cluster.restartNameNode(true);
+    
+    // 4. Verify concat operation was replayed correctly and file status
+    // did not change.
+    assertTrue(dfs.exists(targetFile));
+    assertFalse(dfs.exists(srcFiles[0]));
+
+    FileStatus statusAfterRestart = dfs.getFileStatus(targetFile);
+
+    assertEquals(origStatus.getModificationTime(),
+        statusAfterRestart.getModificationTime());
+  }
 
   // compare content
   private void checkFileContent(byte[] concat, byte[][] bytes ) {



Mime
View raw message