Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 27B9D11A0E for ; Thu, 20 Feb 2014 16:18:47 +0000 (UTC) Received: (qmail 36832 invoked by uid 500); 20 Feb 2014 16:18:45 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 36792 invoked by uid 500); 20 Feb 2014 16:18:45 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 36781 invoked by uid 99); 20 Feb 2014 16:18:44 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 20 Feb 2014 16:18:44 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 20 Feb 2014 16:18:40 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2024423888E2; Thu, 20 Feb 2014 16:18:19 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1570256 - in /hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ src/main/proto/ src/test/java/org/apac... Date: Thu, 20 Feb 2014 16:18:18 -0000 To: hdfs-commits@hadoop.apache.org From: kihwal@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140220161819.2024423888E2@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: kihwal Date: Thu Feb 20 16:18:18 2014 New Revision: 1570256 URL: http://svn.apache.org/r1570256 Log: svn merge -c 1570255 merging from branch-2 to branch-2.4 to fix:HDFS-5962. Mtime and atime are not persisted for symbolic links. Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1570256&r1=1570255&r2=1570256&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Feb 20 16:18:18 2014 @@ -196,6 +196,9 @@ Release 2.4.0 - UNRELEASED HDFS-5979. Typo and logger fix for fsimage PB code. (wang) + HDFS-5962. Mtime and atime are not persisted for symbolic links. (Akira + Ajisaka via kihwal) + BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java?rev=1570256&r1=1570255&r2=1570256&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java Thu Feb 20 16:18:18 2014 @@ -227,8 +227,10 @@ public final class FSImageFormatPBINode INodeSection.INodeSymlink s = n.getSymlink(); final PermissionStatus permissions = loadPermission(s.getPermission(), parent.getLoaderContext().getStringTable()); - return new INodeSymlink(n.getId(), n.getName().toByteArray(), permissions, - 0, 0, s.getTarget().toStringUtf8()); + INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(), + permissions, s.getModificationTime(), s.getAccessTime(), + s.getTarget().toStringUtf8()); + return sym; } private void loadRootINode(INodeSection.INode p) { @@ -408,7 +410,9 @@ public final class FSImageFormatPBINode INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink .newBuilder() .setPermission(buildPermissionStatus(n, parent.getSaverContext().getStringMap())) - .setTarget(ByteString.copyFrom(n.getSymlink())); + .setTarget(ByteString.copyFrom(n.getSymlink())) + .setModificationTime(n.getModificationTime()) + .setAccessTime(n.getAccessTime()); INodeSection.INode r = buildINodeCommon(n) .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); r.writeDelimitedTo(out); Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java?rev=1570256&r1=1570255&r2=1570256&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java Thu Feb 20 16:18:18 2014 @@ -170,8 +170,9 @@ final class LsrPBImage { PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission( d.getPermission(), stringTable); out.print(String.format("-%s - %8s %10s %10s %10d %s%s -> %s\n", p - .getPermission().toString(), p.getUserName(), p.getGroupName(), 0, 0, - parent, inode.getName().toStringUtf8(), d.getTarget().toStringUtf8())); + .getPermission().toString(), p.getUserName(), p.getGroupName(), d + .getModificationTime(), 0, parent, inode.getName().toStringUtf8(), + d.getTarget().toStringUtf8())); } break; default: Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java?rev=1570256&r1=1570255&r2=1570256&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java Thu Feb 20 16:18:18 2014 @@ -289,8 +289,9 @@ public final class PBImageXmlWriter { } private void dumpINodeSymlink(INodeSymlink s) { - o("permission", dumpPermission(s.getPermission())).o("target", - s.getTarget().toStringUtf8()); + o("permission", dumpPermission(s.getPermission())) + .o("target", s.getTarget().toStringUtf8()) + .o("mtime", s.getModificationTime()).o("atime", s.getAccessTime()); } private void dumpNameSection(InputStream in) throws IOException { Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto?rev=1570256&r1=1570255&r2=1570256&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto Thu Feb 20 16:18:18 2014 @@ -110,6 +110,8 @@ message INodeSection { message INodeSymlink { optional fixed64 permission = 1; optional bytes target = 2; + optional uint64 modificationTime = 3; + optional uint64 accessTime = 4; } message INode { @@ -281,4 +283,3 @@ message CacheManagerSection { // repeated CachePoolInfoProto pools // repeated CacheDirectiveInfoProto directives } - Modified: hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java?rev=1570256&r1=1570255&r2=1570256&view=diff ============================================================================== --- hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java (original) +++ hadoop/common/branches/branch-2.4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java Thu Feb 20 16:18:18 2014 @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSOutputS import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; @@ -135,4 +136,51 @@ public class TestFSImage { } } } + + /** + * Ensure mtime and atime can be loaded from fsimage. + */ + @Test(timeout=60000) + public void testLoadMtimeAtime() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + DistributedFileSystem hdfs = cluster.getFileSystem(); + String userDir = hdfs.getHomeDirectory().toUri().getPath().toString(); + Path file = new Path(userDir, "file"); + Path dir = new Path(userDir, "/dir"); + Path link = new Path(userDir, "/link"); + hdfs.createNewFile(file); + hdfs.mkdirs(dir); + hdfs.createSymlink(file, link, false); + + long mtimeFile = hdfs.getFileStatus(file).getModificationTime(); + long atimeFile = hdfs.getFileStatus(file).getAccessTime(); + long mtimeDir = hdfs.getFileStatus(dir).getModificationTime(); + long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime(); + long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime(); + + // save namespace and restart cluster + hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.shutdown(); + cluster = new MiniDFSCluster.Builder(conf).format(false) + .numDataNodes(1).build(); + cluster.waitActive(); + hdfs = cluster.getFileSystem(); + + assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime()); + assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime()); + assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime()); + assertEquals(mtimeLink, hdfs.getFileLinkStatus(link).getModificationTime()); + assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }