Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id CFA7618575 for ; Tue, 5 Jan 2016 19:52:08 +0000 (UTC) Received: (qmail 37559 invoked by uid 500); 5 Jan 2016 19:52:02 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 37182 invoked by uid 500); 5 Jan 2016 19:52:02 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 36269 invoked by uid 99); 5 Jan 2016 19:52:01 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 05 Jan 2016 19:52:01 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id A531BE0B56; Tue, 5 Jan 2016 19:52:01 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: wheat9@apache.org To: common-commits@hadoop.apache.org Date: Tue, 05 Jan 2016 19:52:16 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [16/50] [abbrv] hadoop git commit: [partial-ns] Implement SetTime(). [partial-ns] Implement SetTime(). Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36cdcd77 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36cdcd77 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36cdcd77 Branch: refs/heads/feature-HDFS-8286 Commit: 36cdcd77e60f66135e72b27b4a32e6af6bdb8abe Parents: 7f09c48 Author: Haohui Mai Authored: Tue May 26 14:50:56 2015 -0700 Committer: Haohui Mai Committed: Fri Jun 12 13:56:59 2015 -0700 ---------------------------------------------------------------------- .../hdfs/server/namenode/FSDirAttrOp.java | 97 +++++++++----------- .../hdfs/server/namenode/FSEditLogLoader.java | 9 +- .../hdfs/server/namenode/FSNamesystem.java | 23 ++--- .../hdfs/server/namenode/RWTransaction.java | 4 + 4 files changed, 63 insertions(+), 70 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/36cdcd77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index f16183f..4221f80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -108,31 +108,27 @@ public class FSDirAttrOp { } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - - INodesInPath iip; - fsd.writeLock(); - try { - src = fsd.resolvePath(pc, src, pathComponents); - iip = fsd.getINodesInPath4Write(src); - // Write access is required to set access and modification times + try (RWTransaction tx = fsd.newRWTransaction().begin()) { + Resolver.Result paths = Resolver.resolve(tx, src); + if (paths.invalidPath()) { + throw new InvalidPathException(src); + } else if (paths.notFound()) { + throw new FileNotFoundException(src); + } + FlatINodesInPath iip = paths.inodesInPath(); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); } - final INode inode = iip.getLastINode(); - if (inode == null) { - throw new FileNotFoundException("File/Directory " + src + - " does not exist."); - } - boolean changed = unprotectedSetTimes(fsd, inode, mtime, atime, true, - iip.getLatestSnapshotId()); + FlatINode.Builder b = new FlatINode.Builder() + .mergeFrom(iip.getLastINode()); + boolean changed = unprotectedSetTimes(fsd, b, mtime, atime, true); if (changed) { - fsd.getEditLog().logTimes(src, mtime, atime); + tx.putINode(b.id(), b.build()); + tx.logTimes(src, mtime, atime); + tx.commit(); } - } finally { - fsd.writeUnlock(); + return fsd.getAuditFileInfo(iip); } - return fsd.getAuditFileInfo(iip); } static boolean setReplication( @@ -280,25 +276,23 @@ public class FSDirAttrOp { } } - static boolean setTimes( - FSDirectory fsd, INode inode, long mtime, long atime, boolean force, - int latestSnapshotId) throws QuotaExceededException { - fsd.writeLock(); - try { - return unprotectedSetTimes(fsd, inode, mtime, atime, force, - latestSnapshotId); - } finally { - fsd.writeUnlock(); - } - } - - static boolean unprotectedSetTimes( - FSDirectory fsd, String src, long mtime, long atime, boolean force) - throws UnresolvedLinkException, QuotaExceededException { + static void unprotectedSetTimes( + FSDirectory fsd, RWTransaction tx, String src, long mtime, long atime) + throws IOException { assert fsd.hasWriteLock(); - final INodesInPath i = fsd.getINodesInPath(src, true); - return unprotectedSetTimes(fsd, i.getLastINode(), mtime, atime, - force, i.getLatestSnapshotId()); + Resolver.Result paths = Resolver.resolve(tx, src); + if (paths.invalidPath()) { + throw new InvalidPathException(src); + } else if (paths.notFound()) { + throw new FileNotFoundException(src); + } + FlatINodesInPath iip = paths.inodesInPath(); + FlatINode.Builder b = new FlatINode.Builder() + .mergeFrom(iip.getLastINode()); + boolean changed = unprotectedSetTimes(fsd, b, mtime, atime, true); + if (changed) { + tx.putINode(b.id(), b.build()); + } } /** @@ -450,26 +444,23 @@ public class FSDirAttrOp { } private static boolean unprotectedSetTimes( - FSDirectory fsd, INode inode, long mtime, long atime, boolean force, - int latest) throws QuotaExceededException { - assert fsd.hasWriteLock(); - boolean status = false; + FSDirectory fsd, FlatINode.Builder builder, long mtime, long atime, + boolean force) { + boolean changed = false; if (mtime != -1) { - inode = inode.setModificationTime(mtime, latest); - status = true; + builder.mtime(mtime); + changed = true; } + if (atime != -1) { - long inodeTime = inode.getAccessTime(); - - // if the last access time update was within the last precision interval, then - // no need to store access time - if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) { - status = false; - } else { - inode.setAccessTime(atime, latest); - status = true; + long inodeTime = builder.atime(); + // if the last access time update was within the last precision + // interval, then no need to store access time + if (force || atime > inodeTime + fsd.getAccessTimePrecision()) { + builder.atime(atime); + changed = true; } } - return status; + return changed; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/36cdcd77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 4c399cb..6238771 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -630,9 +630,12 @@ public class FSEditLogLoader { case OP_TIMES: { TimesOp timesOp = (TimesOp)op; - FSDirAttrOp.unprotectedSetTimes( - fsDir, renameReservedPathsOnUpgrade(timesOp.path, logVersion), - timesOp.mtime, timesOp.atime, true); + try (ReplayTransaction tx = fsDir.newReplayTransaction().begin()) { + FSDirAttrOp.unprotectedSetTimes(fsDir, tx, renameReservedPathsOnUpgrade( + timesOp.path, logVersion), + timesOp.mtime, timesOp.atime); + tx.commit(); + } break; } case OP_SYMLINK: { http://git-wip-us.apache.org/repos/asf/hadoop/blob/36cdcd77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9432bc3..47fbe7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1722,9 +1722,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( srcArg); String src = srcArg; - writeLock(); final long now = now(); - try { + try (RWTransaction tx = dir.newRWTransaction().begin()) { checkOperation(OperationCategory.WRITE); /** * Resolve the path again and update the atime only when the file @@ -1744,22 +1743,18 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * HDFS-7463. A better fix is to change the edit log of SetTime to * use inode id instead of a path. */ - src = dir.resolvePath(pc, srcArg, pathComponents); - final INodesInPath iip = dir.getINodesInPath(src, true); - INode inode = iip.getLastINode(); - boolean updateAccessTime = inode != null && - now > inode.getAccessTime() + dir.getAccessTimePrecision(); - if (!isInSafeMode() && updateAccessTime) { - boolean changed = FSDirAttrOp.setTimes(dir, - inode, -1, now, false, iip.getLatestSnapshotId()); - if (changed) { - getEditLog().logTimes(src, -1, now); + Resolver.Result paths = Resolver.resolve(tx, srcArg); + if (paths.ok()) { + FlatINode inode = paths.inodesInPath().getLastINode(); + boolean updateAccessTime = now > inode.atime() + + dir.getAccessTimePrecision(); + if (!isInSafeMode() && updateAccessTime) { + FSDirAttrOp.unprotectedSetTimes(dir, tx, src, -1, now); } + tx.commit(); } } catch (Throwable e) { LOG.warn("Failed to update the access time of " + src, e); - } finally { - writeUnlock(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/36cdcd77/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java index 9b28625..14171c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RWTransaction.java @@ -174,6 +174,10 @@ class RWTransaction extends Transaction { fsd.getEditLog().logSetPermissions(src, permission); } + public void logTimes(String src, long mtime, long atime) { + fsd.getEditLog().logTimes(src, mtime, atime); + } + public void logUpdateBlocks(String path, FlatINodeFileFeature file) { Block[] blocks = new Block[file.numBlocks()]; int i = 0;