Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 31A2B4F86 for ; Fri, 10 Jun 2011 17:57:46 +0000 (UTC) Received: (qmail 91804 invoked by uid 500); 10 Jun 2011 17:57:46 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 91770 invoked by uid 500); 10 Jun 2011 17:57:45 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 91762 invoked by uid 99); 10 Jun 2011 17:57:45 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 10 Jun 2011 17:57:45 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 10 Jun 2011 17:57:42 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id CE72F2388994; Fri, 10 Jun 2011 17:57:20 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1134397 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/ Date: Fri, 10 Jun 2011 17:57:20 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110610175720.CE72F2388994@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: todd Date: Fri Jun 10 17:57:20 2011 New Revision: 1134397 URL: http://svn.apache.org/viewvc?rev=1134397&view=rev Log: HDFS-2041. OP_CONCAT_DELETE doesn't properly restore modification time of the concatenated file when edit logs are replayed. Contributed by Todd Lipcon. Modified: hadoop/hdfs/trunk/CHANGES.txt hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Modified: hadoop/hdfs/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1134397&r1=1134396&r2=1134397&view=diff ============================================================================== --- hadoop/hdfs/trunk/CHANGES.txt (original) +++ hadoop/hdfs/trunk/CHANGES.txt Fri Jun 10 17:57:20 2011 @@ -716,6 +716,9 @@ Trunk (unreleased changes) HDFS-1998. Federation: Make refresh-namenodes.sh refresh all the namenode. (Tanping Wang via suresh) + HDFS-2041. OP_CONCAT_DELETE doesn't properly restore modification time + of the concatenated file when edit logs are replayed. (todd) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1134397&r1=1134396&r2=1134397&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Jun 10 17:57:20 2011 @@ -930,10 +930,10 @@ class FSDirectory implements Closeable { try { // actual move waitForReady(); - - unprotectedConcat(target, srcs); + long timestamp = now(); + unprotectedConcat(target, srcs, timestamp); // do the commit - fsImage.getEditLog().logConcat(target, srcs, now()); + fsImage.getEditLog().logConcat(target, srcs, timestamp); } finally { writeUnlock(); } @@ -948,7 +948,7 @@ class FSDirectory implements Closeable { * Must be public because also called from EditLogs * NOTE: - it does not update quota (not needed for concat) */ - public void unprotectedConcat(String target, String [] srcs) + public void unprotectedConcat(String target, String [] srcs, long timestamp) throws UnresolvedLinkException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "+target); @@ -979,9 +979,8 @@ class FSDirectory implements Closeable { count++; } - long now = now(); - trgInode.setModificationTimeForce(now); - trgParent.setModificationTime(now); + trgInode.setModificationTimeForce(timestamp); + trgParent.setModificationTime(timestamp); // update quota on the parent directory ('count' files removed, 0 space) unprotectedUpdateCount(trgINodes, trgINodes.length-1, - count, 0); } Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1134397&r1=1134396&r2=1134397&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Jun 10 17:57:20 2011 @@ -238,7 +238,8 @@ public class FSEditLogLoader { numOpConcatDelete++; ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op; - fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs); + fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs, + concatDeleteOp.timestamp); break; } case OP_RENAME_OLD: { Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1134397&r1=1134396&r2=1134397&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original) +++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Fri Jun 10 17:57:20 2011 @@ -257,7 +257,6 @@ public abstract class FSEditLogOp { int length; String path; long timestamp; - long atime; PermissionStatus permissions; private MkdirOp() { @@ -280,9 +279,7 @@ public abstract class FSEditLogOp { // However, currently this is not being updated/used because of // performance reasons. if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) { - this.atime = readLong(in); - } else { - this.atime = 0; + /*unused this.atime = */readLong(in); } if (logVersion <= -11) { Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java?rev=1134397&r1=1134396&r2=1134397&view=diff ============================================================================== --- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java (original) +++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java Fri Jun 10 17:57:20 2011 @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -86,16 +87,6 @@ public class TestHDFSConcat { } } - private void runCommand(DFSAdmin admin, String args[], boolean expectEror) - throws Exception { - int val = admin.run(args); - if (expectEror) { - assertEquals(val, -1); - } else { - assertTrue(val>=0); - } - } - /** * Concatenates 10 files into one * Verifies the final size, deletion of the file, number of blocks @@ -221,6 +212,46 @@ public class TestHDFSConcat { assertEquals(trgLen, totalLen+sFileLen); } + + /** + * Test that the concat operation is properly persisted in the + * edit log, and properly replayed on restart. + */ + @Test + public void testConcatInEditLog() throws Exception { + final Path TEST_DIR = new Path("/testConcatInEditLog"); + final long FILE_LEN = blockSize; + + // 1. Concat some files + Path[] srcFiles = new Path[3]; + for (int i = 0; i < srcFiles.length; i++) { + Path path = new Path(TEST_DIR, "src-" + i); + DFSTestUtil.createFile(dfs, path, FILE_LEN, REPL_FACTOR, 1); + srcFiles[i] = path; + } + Path targetFile = new Path(TEST_DIR, "target"); + DFSTestUtil.createFile(dfs, targetFile, FILE_LEN, REPL_FACTOR, 1); + + dfs.concat(targetFile, srcFiles); + + // 2. Verify the concat operation basically worked, and record + // file status. + assertTrue(dfs.exists(targetFile)); + FileStatus origStatus = dfs.getFileStatus(targetFile); + + // 3. Restart NN to force replay from edit log + cluster.restartNameNode(true); + + // 4. Verify concat operation was replayed correctly and file status + // did not change. + assertTrue(dfs.exists(targetFile)); + assertFalse(dfs.exists(srcFiles[0])); + + FileStatus statusAfterRestart = dfs.getFileStatus(targetFile); + + assertEquals(origStatus.getModificationTime(), + statusAfterRestart.getModificationTime()); + } // compare content private void checkFileContent(byte[] concat, byte[][] bytes ) {