hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject hadoop git commit: HDFS-7543. Avoid path resolution when getting FileStatus for audit logs. Contributed by Haohui Mai.
Date Thu, 18 Dec 2014 19:25:22 GMT
Repository: hadoop
Updated Branches:
  refs/heads/trunk 07619aa51 -> 65f2a4ee6


HDFS-7543. Avoid path resolution when getting FileStatus for audit logs. Contributed by Haohui
Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65f2a4ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65f2a4ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65f2a4ee

Branch: refs/heads/trunk
Commit: 65f2a4ee600dfffa5203450261da3c1989de25a9
Parents: 07619aa
Author: Haohui Mai <wheat9@apache.org>
Authored: Thu Dec 18 11:24:57 2014 -0800
Committer: Haohui Mai <wheat9@apache.org>
Committed: Thu Dec 18 11:25:14 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java | 28 +++++++-------
 .../hdfs/server/namenode/FSDirAttrOp.java       | 20 ++++++----
 .../hdfs/server/namenode/FSDirConcatOp.java     |  2 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |  7 ++--
 .../hdfs/server/namenode/FSDirRenameOp.java     | 15 +++++---
 .../server/namenode/FSDirStatAndListingOp.java  | 39 ++++++++++++++------
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |  5 ++-
 .../hdfs/server/namenode/FSDirXAttrOp.java      | 20 +++++-----
 .../hdfs/server/namenode/FSDirectory.java       |  7 ++--
 .../hdfs/server/namenode/FSNamesystem.java      | 10 ++---
 11 files changed, 91 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8150a54..95da136 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -471,6 +471,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7373. Clean up temporary files after fsimage transfer failures.
     (kihwal)
 
+    HDFS-7543. Avoid path resolution when getting FileStatus for audit logs.
+    (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 0d2b34c..7aaa21c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -41,10 +41,10 @@ class FSDirAclOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     src = fsd.resolvePath(pc, src, pathComponents);
+    INodesInPath iip;
     fsd.writeLock();
     try {
-      INodesInPath iip = fsd.getINodesInPath4Write(
-          FSDirectory.normalizePath(src), true);
+      iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
       fsd.checkOwner(pc, iip);
       INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getLatestSnapshotId();
@@ -56,7 +56,7 @@ class FSDirAclOp {
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus removeAclEntries(
@@ -67,10 +67,10 @@ class FSDirAclOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     src = fsd.resolvePath(pc, src, pathComponents);
+    INodesInPath iip;
     fsd.writeLock();
     try {
-      INodesInPath iip = fsd.getINodesInPath4Write(
-          FSDirectory.normalizePath(src), true);
+      iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
       fsd.checkOwner(pc, iip);
       INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getLatestSnapshotId();
@@ -82,7 +82,7 @@ class FSDirAclOp {
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
@@ -92,10 +92,10 @@ class FSDirAclOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     src = fsd.resolvePath(pc, src, pathComponents);
+    INodesInPath iip;
     fsd.writeLock();
     try {
-      INodesInPath iip = fsd.getINodesInPath4Write(
-          FSDirectory.normalizePath(src), true);
+      iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
       fsd.checkOwner(pc, iip);
       INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getLatestSnapshotId();
@@ -107,7 +107,7 @@ class FSDirAclOp {
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
@@ -117,16 +117,17 @@ class FSDirAclOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     src = fsd.resolvePath(pc, src, pathComponents);
+    INodesInPath iip;
     fsd.writeLock();
     try {
-      INodesInPath iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
       unprotectedRemoveAcl(fsd, iip);
     } finally {
       fsd.writeUnlock();
     }
     fsd.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus setAcl(
@@ -137,16 +138,17 @@ class FSDirAclOp {
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = fsd.getPermissionChecker();
     src = fsd.resolvePath(pc, src, pathComponents);
+    INodesInPath iip;
     fsd.writeLock();
     try {
-      INodesInPath iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
       List<AclEntry> newAcl = unprotectedSetAcl(fsd, src, aclSpec);
       fsd.getEditLog().logSetAcl(src, newAcl);
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static AclStatus getAclStatus(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 1e3c401..6c1890e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -50,17 +50,18 @@ public class FSDirAttrOp {
     String src = srcArg;
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    INodesInPath iip;
     fsd.writeLock();
     try {
       src = fsd.resolvePath(pc, src, pathComponents);
-      final INodesInPath iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
       unprotectedSetPermission(fsd, src, permission);
     } finally {
       fsd.writeUnlock();
     }
     fsd.getEditLog().logSetPermissions(src, permission);
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus setOwner(
@@ -68,10 +69,11 @@ public class FSDirAttrOp {
       throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    INodesInPath iip;
     fsd.writeLock();
     try {
       src = fsd.resolvePath(pc, src, pathComponents);
-      final INodesInPath iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
       if (!pc.isSuperUser()) {
         if (username != null && !pc.getUser().equals(username)) {
@@ -86,7 +88,7 @@ public class FSDirAttrOp {
       fsd.writeUnlock();
     }
     fsd.getEditLog().logSetOwner(src, username, group);
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static HdfsFileStatus setTimes(
@@ -102,10 +104,11 @@ public class FSDirAttrOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
 
+    INodesInPath iip;
     fsd.writeLock();
     try {
       src = fsd.resolvePath(pc, src, pathComponents);
-      final INodesInPath iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.getINodesInPath4Write(src);
       // Write access is required to set access and modification times
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
@@ -123,7 +126,7 @@ public class FSDirAttrOp {
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static boolean setReplication(
@@ -165,10 +168,11 @@ public class FSDirAttrOp {
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    INodesInPath iip;
     fsd.writeLock();
     try {
       src = FSDirectory.resolvePath(src, pathComponents, fsd);
-      final INodesInPath iip = fsd.getINodesInPath4Write(src);
+      iip = fsd.getINodesInPath4Write(src);
 
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
@@ -185,7 +189,7 @@ public class FSDirAttrOp {
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index f7e57be..43d3b20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -168,7 +168,7 @@ class FSDirConcatOp {
       fsd.writeUnlock();
     }
     fsd.getEditLog().logConcat(target, srcs, timestamp, logRetryCache);
-    return fsd.getAuditFileInfo(target, false);
+    return fsd.getAuditFileInfo(trgIip);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 7e62d2c..4ea77e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -42,7 +42,6 @@ class FSDirMkdirOp {
       FSNamesystem fsn, String src, PermissionStatus permissions,
       boolean createParent) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();
-    final String srcArg = src;
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
     }
@@ -70,12 +69,12 @@ class FSDirMkdirOp {
       // heuristic because the mkdirs() operation might need to
       // create multiple inodes.
       fsn.checkFsObjectLimit();
-
-      if (mkdirsRecursively(fsd, iip, permissions, false, now()) == null) {
+      iip = mkdirsRecursively(fsd, iip, permissions, false, now());
+      if (iip == null) {
         throw new IOException("Failed to create directory: " + src);
       }
     }
-    return fsd.getAuditFileInfo(srcArg, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static INode unprotectedMkdir(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 4b4dc8c..4239f46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -70,7 +70,8 @@ class FSDirRenameOp {
     @SuppressWarnings("deprecation")
     final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
     if (status) {
-      resultingStat = fsd.getAuditFileInfo(dst, false);
+      INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
+      resultingStat = fsd.getAuditFileInfo(dstIIP);
     }
     return new RenameOldResult(status, resultingStat);
   }
@@ -122,6 +123,7 @@ class FSDirRenameOp {
    * <br>
    */
   @Deprecated
+  @SuppressWarnings("deprecation")
   static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
       long timestamp) throws IOException {
     if (fsd.isDir(dst)) {
@@ -246,10 +248,11 @@ class FSDirRenameOp {
     src = fsd.resolvePath(pc, src, srcComponents);
     dst = fsd.resolvePath(pc, dst, dstComponents);
     renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
-    HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dst, false);
+    INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
+    HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);
 
-    return new AbstractMap.SimpleImmutableEntry<BlocksMapUpdateInfo,
-        HdfsFileStatus>(collectedBlocks, resultingStat);
+    return new AbstractMap.SimpleImmutableEntry<>(
+        collectedBlocks, resultingStat);
   }
 
   /**
@@ -357,7 +360,7 @@ class FSDirRenameOp {
 
     fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
     final INode dstInode = dstIIP.getLastINode();
-    List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
+    List<INodeDirectory> snapshottableDirs = new ArrayList<>();
     if (dstInode != null) { // Destination exists
       validateOverwrite(src, dst, overwrite, srcInode, dstInode);
       FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs);
@@ -419,7 +422,7 @@ class FSDirRenameOp {
         if (removedDst != null) {
           undoRemoveDst = false;
           if (removedNum > 0) {
-            List<INode> removedINodes = new ChunkedArrayList<INode>();
+            List<INode> removedINodes = new ChunkedArrayList<>();
             if (!removedDst.isInLatestSnapshot(tx.dstIIP.getLatestSnapshotId())) {
               removedDst.destroyAndCollectBlocks(collectedBlocks,
                   removedINodes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 6ca30ad..dc0fe1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -264,29 +264,46 @@ class FSDirStatAndListingOp {
   /** Get the file info for a specific file.
    * @param fsd FSDirectory
    * @param src The string representation of the path to the file
-   * @param resolveLink whether to throw UnresolvedLinkException
    * @param isRawPath true if a /.reserved/raw pathname was passed by the user
    * @param includeStoragePolicy whether to include storage policy
    * @return object containing information regarding the file
    *         or null if file not found
    */
   static HdfsFileStatus getFileInfo(
+      FSDirectory fsd, INodesInPath src, boolean isRawPath,
+      boolean includeStoragePolicy)
+      throws IOException {
+    fsd.readLock();
+    try {
+      final INode i = src.getLastINode();
+      byte policyId = includeStoragePolicy && i != null && !i.isSymlink()
?
+          i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
+      return i == null ? null : createFileStatus(
+          fsd, HdfsFileStatus.EMPTY_NAME, i, policyId,
+          src.getPathSnapshotId(), isRawPath, src);
+    } finally {
+      fsd.readUnlock();
+    }
+  }
+
+  static HdfsFileStatus getFileInfo(
       FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
       boolean includeStoragePolicy)
     throws IOException {
     String srcs = FSDirectory.normalizePath(src);
+    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
+      if (fsd.getINode4DotSnapshot(srcs) != null) {
+        return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
+            HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
+            BlockStoragePolicySuite.ID_UNSPECIFIED);
+      }
+      return null;
+    }
+
     fsd.readLock();
     try {
-      if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
-        return getFileInfo4DotSnapshot(fsd, srcs);
-      }
-      final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, resolveLink);
-      final INode i = inodesInPath.getLastINode();
-      byte policyId = includeStoragePolicy && i != null && !i.isSymlink()
?
-          i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
-      return i == null ? null : createFileStatus(fsd,
-          HdfsFileStatus.EMPTY_NAME, i, policyId,
-          inodesInPath.getPathSnapshotId(), isRawPath, inodesInPath);
+      final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
+      return getFileInfo(fsd, iip, isRawPath, includeStoragePolicy);
     } finally {
       fsd.readUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index 3380d0a..d232b87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -52,10 +52,11 @@ class FSDirSymlinkOp {
 
     FSPermissionChecker pc = fsn.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link);
+    INodesInPath iip;
     fsd.writeLock();
     try {
       link = fsd.resolvePath(pc, link, pathComponents);
-      final INodesInPath iip = fsd.getINodesInPath4Write(link, false);
+      iip = fsd.getINodesInPath4Write(link, false);
       if (!createParent) {
         fsd.verifyParentDir(iip, link);
       }
@@ -76,7 +77,7 @@ class FSDirSymlinkOp {
       fsd.writeUnlock();
     }
     NameNode.getNameNodeMetrics().incrCreateSymlinkOps();
-    return fsd.getAuditFileInfo(link, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static INodeSymlink unprotectedAddSymlink(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 47a995d..45e63f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -66,21 +66,21 @@ class FSDirXAttrOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(
         pc, xAttr, FSDirectory.isReservedRawName(src));
-    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
-        src);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     src = fsd.resolvePath(pc, src, pathComponents);
-    final INodesInPath iip = fsd.getINodesInPath4Write(src);
-    checkXAttrChangeAccess(fsd, iip, xAttr, pc);
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     xAttrs.add(xAttr);
+    INodesInPath iip;
     fsd.writeLock();
     try {
+      iip = fsd.getINodesInPath4Write(src);
+      checkXAttrChangeAccess(fsd, iip, xAttr, pc);
       unprotectedSetXAttrs(fsd, src, xAttrs, flag);
     } finally {
       fsd.writeUnlock();
     }
     fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static List<XAttr> getXAttrs(FSDirectory fsd, final String srcArg,
@@ -164,14 +164,16 @@ class FSDirXAttrOp {
         pc, xAttr, FSDirectory.isReservedRawName(src));
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
         src);
-    src = fsd.resolvePath(pc, src, pathComponents);
-    final INodesInPath iip = fsd.getINodesInPath4Write(src);
-    checkXAttrChangeAccess(fsd, iip, xAttr, pc);
 
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     xAttrs.add(xAttr);
+    INodesInPath iip;
     fsd.writeLock();
     try {
+      src = fsd.resolvePath(pc, src, pathComponents);
+      iip = fsd.getINodesInPath4Write(src);
+      checkXAttrChangeAccess(fsd, iip, xAttr, pc);
+
       List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs);
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
         fsd.getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
@@ -182,7 +184,7 @@ class FSDirXAttrOp {
     } finally {
       fsd.writeUnlock();
     }
-    return fsd.getAuditFileInfo(src, false);
+    return fsd.getAuditFileInfo(iip);
   }
 
   static List<XAttr> unprotectedRemoveXAttrs(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 9ddc5c0..c025e01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1647,11 +1647,10 @@ public class FSDirectory implements Closeable {
     }
   }
 
-  HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink)
-    throws IOException {
+  HdfsFileStatus getAuditFileInfo(INodesInPath iip)
+      throws IOException {
     return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation())
-      ? FSDirStatAndListingOp.getFileInfo(this, path, resolveSymlink, false,
-        false) : null;
+        ? FSDirStatAndListingOp.getFileInfo(this, iip, false, false) : null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f2a4ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b411c67..bb1c4ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -337,11 +337,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return !isDefaultAuditLogger || auditLog.isInfoEnabled();
   }
 
-  private HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink)
-      throws IOException {
-    return dir.getAuditFileInfo(path, resolveSymlink);
-  }
-  
   private void logAuditEvent(boolean succeeded, String cmd, String src)
       throws IOException {
     logAuditEvent(succeeded, cmd, src, null, null);
@@ -7669,7 +7664,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
       xAttrs.add(ezXAttr);
       getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-      resultingStat = getAuditFileInfo(src, false);
+      final INodesInPath iip = dir.getINodesInPath4Write(src, false);
+      resultingStat = dir.getAuditFileInfo(iip);
     } finally {
       writeUnlock();
     }
@@ -7703,7 +7699,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         dir.checkPathAccess(pc, iip, FsAction.READ);
       }
       final EncryptionZone ret = dir.getEZForPath(iip);
-      resultingStat = getAuditFileInfo(src, false);
+      resultingStat = dir.getAuditFileInfo(iip);
       success = true;
       return ret;
     } finally {


Mime
View raw message