hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kih...@apache.org
Subject hadoop git commit: HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.
Date Fri, 07 Oct 2016 19:35:44 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2de2d0df1 -> 5ea87e4ae


HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.

(cherry picked from commit 3565c9af17ab05bf9e7f68b71b6c6850df772bb9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ea87e4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ea87e4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ea87e4a

Branch: refs/heads/branch-2
Commit: 5ea87e4aebab066aa777c2593764f6b59fa78091
Parents: 2de2d0d
Author: Kihwal Lee <kihwal@apache.org>
Authored: Fri Oct 7 14:35:28 2016 -0500
Committer: Kihwal Lee <kihwal@apache.org>
Committed: Fri Oct 7 14:35:28 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirDeleteOp.java     | 63 ++++++++++----------
 .../hdfs/server/namenode/FSEditLogLoader.java   | 11 ++--
 .../hdfs/server/namenode/FSNamesystem.java      |  2 +-
 3 files changed, 38 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ea87e4a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 21ee3ce..328ce79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -55,7 +55,7 @@ class FSDirDeleteOp {
     FSNamesystem fsn = fsd.getFSNamesystem();
     fsd.writeLock();
     try {
-      if (deleteAllowed(iip, iip.getPath()) ) {
+      if (deleteAllowed(iip)) {
         List<INodeDirectory> snapshottableDirs = new ArrayList<>();
         FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
         ReclaimContext context = new ReclaimContext(
@@ -98,20 +98,24 @@ class FSDirDeleteOp {
     FSDirectory fsd = fsn.getFSDirectory();
     FSPermissionChecker pc = fsd.getPermissionChecker();
 
-    final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
-    src = iip.getPath();
-    if (!recursive && fsd.isNonEmptyDirectory(iip)) {
-      throw new PathIsNotEmptyDirectoryException(src + " is non empty");
+    if (FSDirectory.isExactReservedName(src)) {
+      throw new InvalidPathException(src);
     }
+
+    final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
     if (fsd.isPermissionEnabled()) {
       fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                           FsAction.ALL, true);
     }
-    if (recursive && fsd.isNonEmptyDirectory(iip)) {
-      checkProtectedDescendants(fsd, src);
+    if (fsd.isNonEmptyDirectory(iip)) {
+      if (!recursive) {
+        throw new PathIsNotEmptyDirectoryException(
+            iip.getPath() + " is non empty");
+      }
+      checkProtectedDescendants(fsd, iip);
     }
 
-    return deleteInternal(fsn, src, iip, logRetryCache);
+    return deleteInternal(fsn, iip, logRetryCache);
   }
 
   /**
@@ -126,17 +130,14 @@ class FSDirDeleteOp {
    * @param src a string representation of a path to an inode
    * @param mtime the time the inode is removed
    */
-  static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
+  static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
       throws IOException {
     assert fsd.hasWriteLock();
     FSNamesystem fsn = fsd.getFSNamesystem();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     List<INode> removedINodes = new ChunkedArrayList<>();
     List<Long> removedUCFiles = new ChunkedArrayList<>();
-
-    final INodesInPath iip = fsd.getINodesInPath4Write(
-        FSDirectory.normalizePath(src), false);
-    if (!deleteAllowed(iip, src)) {
+    if (!deleteAllowed(iip)) {
       return;
     }
     List<INodeDirectory> snapshottableDirs = new ArrayList<>();
@@ -162,7 +163,6 @@ class FSDirDeleteOp {
    * <p>
    * For small directory or file the deletion is done in one shot.
    * @param fsn namespace
-   * @param src path name to be deleted
    * @param iip the INodesInPath instance containing all the INodes for the path
    * @param logRetryCache whether to record RPC ids in editlog for retry cache
    *          rebuilding
@@ -170,15 +170,11 @@ class FSDirDeleteOp {
    * @throws IOException
    */
   static BlocksMapUpdateInfo deleteInternal(
-      FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
+      FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
       throws IOException {
     assert fsn.hasWriteLock();
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
-    }
-
-    if (FSDirectory.isExactReservedName(src)) {
-      throw new InvalidPathException(src);
+      NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
     }
 
     FSDirectory fsd = fsn.getFSDirectory();
@@ -193,14 +189,14 @@ class FSDirDeleteOp {
     if (filesRemoved < 0) {
       return null;
     }
-    fsd.getEditLog().logDelete(src, mtime, logRetryCache);
+    fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache);
     incrDeletedFileCount(filesRemoved);
 
     fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
 
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
-                                        + src +" is removed");
+      NameNode.stateChangeLog.debug(
+          "DIR* Namesystem.delete: " + iip.getPath() +" is removed");
     }
     return collectedBlocks;
   }
@@ -209,19 +205,18 @@ class FSDirDeleteOp {
     NameNode.getNameNodeMetrics().incrFilesDeleted(count);
   }
 
-  private static boolean deleteAllowed(final INodesInPath iip,
-      final String src) {
+  private static boolean deleteAllowed(final INodesInPath iip) {
     if (iip.length() < 1 || iip.getLastINode() == null) {
       if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug(
             "DIR* FSDirectory.unprotectedDelete: failed to remove "
-                + src + " because it does not exist");
+                + iip.getPath() + " because it does not exist");
       }
       return false;
     } else if (iip.length() == 1) { // src is the root
       NameNode.stateChangeLog.warn(
-          "DIR* FSDirectory.unprotectedDelete: failed to remove " + src +
-              " because the root is not allowed to be deleted");
+          "DIR* FSDirectory.unprotectedDelete: failed to remove " +
+              iip.getPath() + " because the root is not allowed to be deleted");
       return false;
     }
     return true;
@@ -278,15 +273,19 @@ class FSDirDeleteOp {
    * Throw if the given directory has any non-empty protected descendants
    * (including itself).
    *
-   * @param src directory whose descendants are to be checked. The caller
-   *            must ensure src is not terminated with {@link Path#SEPARATOR}.
+   * @param iip directory whose descendants are to be checked.
    * @throws AccessControlException if a non-empty protected descendant
    *                                was found.
    */
-  private static void checkProtectedDescendants(FSDirectory fsd, String src)
-      throws AccessControlException, UnresolvedLinkException {
+  private static void checkProtectedDescendants(
+      FSDirectory fsd, INodesInPath iip)
+          throws AccessControlException, UnresolvedLinkException {
     final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
+    if (protectedDirs.isEmpty()) {
+      return;
+    }
 
+    String src = iip.getPath();
     // Is src protected? Caller has already checked it is non-empty.
     if (protectedDirs.contains(src)) {
       throw new AccessControlException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ea87e4a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 21c94f8..8015a29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -352,7 +352,7 @@ public class FSEditLogLoader {
       INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
       if (oldFile != null && addCloseOp.overwrite) {
         // This is OP_ADD with overwrite
-        FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime);
+        FSDirDeleteOp.deleteForEditLog(fsDir, iip, addCloseOp.mtime);
         iip = INodesInPath.replace(iip, iip.length() - 1, null);
         oldFile = null;
       }
@@ -552,10 +552,11 @@ public class FSEditLogLoader {
     }
     case OP_DELETE: {
       DeleteOp deleteOp = (DeleteOp)op;
-      FSDirDeleteOp.deleteForEditLog(
-          fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
-          deleteOp.timestamp);
-      
+      final String src = renameReservedPathsOnUpgrade(
+          deleteOp.path, logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath4Write(src, false);
+      FSDirDeleteOp.deleteForEditLog(fsDir, iip, deleteOp.timestamp);
+
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ea87e4a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 4f2548a..1ff1748 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3784,7 +3784,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
           BlocksMapUpdateInfo toRemoveBlocks =
               FSDirDeleteOp.deleteInternal(
-                  FSNamesystem.this, bc.getName(),
+                  FSNamesystem.this,
                   INodesInPath.fromINode((INodeFile) bc), false);
           changed |= toRemoveBlocks != null;
           if (toRemoveBlocks != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message