hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1408923 - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/main/java/org/apache/hadoop/hdfs/ut...
Date Tue, 13 Nov 2012 19:59:58 GMT
Author: szetszwo
Date: Tue Nov 13 19:59:55 2012
New Revision: 1408923

URL: http://svn.apache.org/viewvc?rev=1408923&view=rev
Log:
HDFS-4177. Add a snapshot parameter to INodeDirectory.getChildrenList() for selecting particular
snapshot children list views.

Added:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
Tue Nov 13 19:59:55 2012
@@ -64,3 +64,6 @@ Branch-2802 Snapshot (Unreleased)
   and has snapshots. (Jing Zhao via szetszwo)
 
   HDFS-4170. Add snapshot information to INodesInPath.  (szetszwo)
+
+  HDFS-4177. Add a snapshot parameter to INodeDirectory.getChildrenList() for
+  selecting particular snapshot children list views.  (szetszwo)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Tue Nov 13 19:59:55 2012
@@ -23,7 +23,6 @@ import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -60,7 +59,9 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ByteArray;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.base.Preconditions;
 
@@ -696,14 +697,15 @@ public class FSDirectory implements Clos
             + error);
         throw new FileAlreadyExistsException(error);
       }
-      List<INode> children = dstInode.isDirectory() ? 
-          ((INodeDirectory) dstInode).getChildren() : null;
-      if (children != null && children.size() != 0) {
-        error = "rename cannot overwrite non empty destination directory "
-            + dst;
-        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-            + error);
-        throw new IOException(error);
+      if (dstInode.isDirectory()) {
+        final ReadOnlyList<INode> children = ((INodeDirectory) dstInode
+            ).getChildrenList(dstInodesInPath.getPathSnapshot());
+        if (!children.isEmpty()) {
+          error = "rename destination directory is not empty: " + dst;
+          NameNode.stateChangeLog.warn(
+              "DIR* FSDirectory.unprotectedRenameTo: " + error);
+          throw new IOException(error);
+        }
       }
       INode snapshotNode = hasSnapshot(dstInode);
       if (snapshotNode != null) {
@@ -1072,12 +1074,14 @@ public class FSDirectory implements Clos
   boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException {
     readLock();
     try {
-      final INode inode = rootDir.getNode(path, false);
+      final INodesInPath inodesInPath = rootDir.getINodesInPath(path, false);
+      final INode inode = inodesInPath.getINode(0);
       if (inode == null || !inode.isDirectory()) {
         //not found or not a directory
         return false;
       }
-      return ((INodeDirectory)inode).getChildrenList().size() != 0;
+      final Snapshot s = inodesInPath.getPathSnapshot();
+      return !((INodeDirectory)inode).getChildrenList(s).isEmpty();
     } finally {
       readUnlock();
     }
@@ -1155,13 +1159,10 @@ public class FSDirectory implements Clos
           && ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0)
{
         return target;
       }
-      List<INode> children = targetDir.getChildren();
-      if (children != null) {
-        for (INode child : children) {
-          INode snapshotDir = hasSnapshot(child);
-          if (snapshotDir != null) {
-            return snapshotDir;
-          }
+      for (INode child : targetDir.getChildrenList(null)) {
+        INode snapshotDir = hasSnapshot(child);
+        if (snapshotDir != null) {
+          return snapshotDir;
         }
       }
     }
@@ -1195,7 +1196,7 @@ public class FSDirectory implements Clos
       replaceINodeUnsynced(path, oldnode, newnode);
 
       //update children's parent directory
-      for(INode i : newnode.getChildrenList()) {
+      for(INode i : newnode.getChildrenList(null)) {
         i.parent = newnode;
       }
     } finally {
@@ -1239,7 +1240,8 @@ public class FSDirectory implements Clos
 
     readLock();
     try {
-      INode targetNode = rootDir.getNode(srcs, true);
+      final INodesInPath inodesInPath = rootDir.getINodesInPath(srcs, true);
+      final INode targetNode = inodesInPath.getINode(0);
       if (targetNode == null)
         return null;
       
@@ -1248,8 +1250,10 @@ public class FSDirectory implements Clos
             new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME,
                 targetNode, needLocation)}, 0);
       }
+
       INodeDirectory dirInode = (INodeDirectory)targetNode;
-      List<INode> contents = dirInode.getChildrenList();
+      final ReadOnlyList<INode> contents = dirInode.getChildrenList(
+          inodesInPath.getPathSnapshot());
       int startChild = dirInode.nextChild(startAfter);
       int totalNumChildren = contents.size();
       int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
@@ -1738,7 +1742,7 @@ public class FSDirectory implements Clos
       }
       if (maxDirItems != 0) {
         INodeDirectory parent = (INodeDirectory)pathComponents[pos-1];
-        int count = parent.getChildrenList().size();
+        int count = parent.getChildrenList(null).size();
         if (count >= maxDirItems) {
           throw new MaxDirectoryItemsExceededException(maxDirItems, count);
         }
@@ -1881,7 +1885,7 @@ public class FSDirectory implements Clos
      * INode. using 'parent' is not currently recommended. */
     nodesInPath.add(dir);
 
-    for (INode child : dir.getChildrenList()) {
+    for (INode child : dir.getChildrenList(null)) {
       if (child.isDirectory()) {
         updateCountForINodeWithQuota((INodeDirectory)child, 
                                      counts, nodesInPath);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
Tue Nov 13 19:59:55 2012
@@ -30,7 +30,6 @@ import java.security.DigestInputStream;
 import java.security.DigestOutputStream;
 import java.security.MessageDigest;
 import java.util.Arrays;
-import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -44,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
 
@@ -530,9 +530,10 @@ class FSImageFormat {
     private void saveImage(ByteBuffer currentDirName,
                                   INodeDirectory current,
                                   DataOutputStream out) throws IOException {
-      List<INode> children = current.getChildren();
-      if (children == null || children.isEmpty())
+      final ReadOnlyList<INode> children = current.getChildrenList(null);
+      if (children.isEmpty()) {
         return;
+      }
       // print prefix (parent directory name)
       int prefixLen = currentDirName.position();
       if (prefixLen == 0) {  // root

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
Tue Nov 13 19:59:55 2012
@@ -28,6 +28,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -121,7 +123,8 @@ class FSPermissionChecker {
     }
     // check if (parentAccess != null) && file exists, then check sb
       // Resolve symlinks, the check is performed on the link target.
-      final INode[] inodes = root.getExistingPathINodes(path, true).getINodes();
+      final INodesInPath inodesInPath = root.getExistingPathINodes(path, true); 
+      final INode[] inodes = inodesInPath.getINodes();
       int ancestorIndex = inodes.length - 2;
       for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
           ancestorIndex--);
@@ -141,7 +144,8 @@ class FSPermissionChecker {
         check(inodes[inodes.length - 1], access);
       }
       if (subAccess != null) {
-        checkSubAccess(inodes[inodes.length - 1], subAccess);
+        final Snapshot s = inodesInPath.getPathSnapshot();
+        checkSubAccess(inodes[inodes.length - 1], s, subAccess);
       }
       if (doCheckOwner) {
         checkOwner(inodes[inodes.length - 1]);
@@ -162,7 +166,7 @@ class FSPermissionChecker {
     }
   }
 
-  private void checkSubAccess(INode inode, FsAction access
+  private void checkSubAccess(INode inode, Snapshot snapshot, FsAction access
       ) throws AccessControlException {
     if (inode == null || !inode.isDirectory()) {
       return;
@@ -173,7 +177,7 @@ class FSPermissionChecker {
       INodeDirectory d = directories.pop();
       check(d, access);
 
-      for(INode child : d.getChildrenList()) {
+      for(INode child : d.getChildrenList(snapshot)) {
         if (child.isDirectory()) {
           directories.push((INodeDirectory)child);
         }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
Tue Nov 13 19:59:55 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -48,7 +49,11 @@ import com.google.common.primitives.Sign
  */
 @InterfaceAudience.Private
 public abstract class INode implements Comparable<byte[]> {
-  static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>());
+  static final List<INode> EMPTY_LIST
+      = Collections.unmodifiableList(new ArrayList<INode>());
+  static final ReadOnlyList<INode> EMPTY_READ_ONLY_LIST
+      = ReadOnlyList.Util.asReadOnlyList(EMPTY_LIST);
+
   /**
    *  The inode name is in java UTF8 encoding; 
    *  The name in HdfsFileStatus should keep the same encoding as this.

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
Tue Nov 13 19:59:55 2012
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.U
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -55,16 +56,14 @@ public class INodeDirectory extends INod
   protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
   final static String ROOT_NAME = "";
 
-  private List<INode> children;
+  private List<INode> children = null;
 
   public INodeDirectory(String name, PermissionStatus permissions) {
     super(name, permissions);
-    this.children = null;
   }
 
   public INodeDirectory(PermissionStatus permissions, long mTime) {
     super(permissions, mTime, 0);
-    this.children = null;
   }
 
   /** constructor */
@@ -79,7 +78,7 @@ public class INodeDirectory extends INod
    */
   public INodeDirectory(INodeDirectory other) {
     super(other);
-    this.children = other.getChildren();
+    this.children = other.children;
   }
   
   /** @return true unconditionally. */
@@ -118,39 +117,23 @@ public class INodeDirectory extends INod
       throw new IllegalArgumentException("No child exists to be replaced");
     }
   }
-  
-  INode getChild(String name) {
-    return getChildINode(DFSUtil.string2Bytes(name));
-  }
 
-  private INode getChildINode(byte[] name) {
-    if (children == null) {
-      return null;
-    }
-    int low = Collections.binarySearch(children, name);
-    if (low >= 0) {
-      return children.get(low);
-    }
-    return null;
+  private INode getChild(byte[] name, Snapshot snapshot) {
+    final ReadOnlyList<INode> c = getChildrenList(snapshot);
+    final int i = ReadOnlyList.Util.binarySearch(c, name);
+    return i < 0? null: c.get(i);
   }
 
-  /**
-   * @return the INode of the last component in components, or null if the last
-   * component does not exist.
-   */
-  private INode getNode(byte[][] components, boolean resolveLink
+  /** @return the {@link INodesInPath} containing only the last inode. */
+  INodesInPath getINodesInPath(String path, boolean resolveLink
       ) throws UnresolvedLinkException {
-    INodesInPath inodesInPath = getExistingPathINodes(components, 1,
-        resolveLink);
-    return inodesInPath.inodes[0];
+    return getExistingPathINodes(getPathComponents(path), 1, resolveLink);
   }
 
-  /**
-   * This is the external interface
-   */
+  /** @return the last inode in the path. */
   INode getNode(String path, boolean resolveLink) 
     throws UnresolvedLinkException {
-    return getNode(getPathComponents(path), resolveLink);
+    return getINodesInPath(path, resolveLink).getINode(0);
   }
 
   /**
@@ -269,7 +252,8 @@ public class INodeDirectory extends INod
         }
       } else {
         // normal case, and also for resolving file/dir under snapshot root
-        curNode = parentDir.getChildINode(components[count + 1]);
+        curNode = parentDir.getChild(components[count + 1],
+            existing.getPathSnapshot());
       }
       count++;
       index++;
@@ -470,16 +454,14 @@ public class INodeDirectory extends INod
   }
 
   /**
-   * @return an empty list if the children list is null;
-   *         otherwise, return the children list.
-   *         The returned list should not be modified.
-   */
-  public List<INode> getChildrenList() {
-    return children==null ? EMPTY_LIST : children;
-  }
-  /** @return the children list which is possibly null. */
-  public List<INode> getChildren() {
-    return children;
+   * @return the current children list if the specified snapshot is null;
+   *         otherwise, return the children list corresponding to the snapshot.
+   *         Note that the returned list is never null.
+   */
+  public ReadOnlyList<INode> getChildrenList(final Snapshot snapshot) {
+    //TODO: use snapshot to select children list
+    return children == null ? EMPTY_READ_ONLY_LIST
+        : ReadOnlyList.Util.asReadOnlyList(children);
   }
   /** Set the children list. */
   public void setChildren(List<INode> children) {
@@ -545,11 +527,19 @@ public class INodeDirectory extends INod
     }
 
     /**
-     * @return the snapshot associated to the path.
-     * @see #snapshot
+     * For non-snapshot paths, return the latest snapshot found in the path.
+     * For snapshot paths, return null.
+     */
+    public Snapshot getLatestSnapshot() {
+      return isSnapshot? null: snapshot;
+    }
+    
+    /**
+     * For snapshot paths, return the snapshot specified in the path.
+     * For non-snapshot paths, return null.
      */
-    public Snapshot getSnapshot() {
-      return snapshot;
+    public Snapshot getPathSnapshot() {
+      return isSnapshot? snapshot: null;
     }
 
     private void setSnapshot(Snapshot s) {
@@ -576,6 +566,11 @@ public class INodeDirectory extends INod
       return inodes;
     }
     
+    /** @return the i-th inode. */
+    INode getINode(int i) {
+      return inodes[i];
+    }
+    
     /**
      * @return index of the {@link INodeDirectoryWithSnapshot} in
      *         {@link #inodes} for snapshot path, else -1.
@@ -626,7 +621,7 @@ public class INodeDirectory extends INod
         for(int i = 1; i < inodes.length; i++) {
           b.append(", ").append(toString(inodes[i]));
         }
-        b.append("]");
+        b.append("], length=").append(inodes.length);
       }
       b.append("\n  numNonNull = ").append(numNonNull)
        .append("\n  capacity   = ").append(capacity)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
Tue Nov 13 19:59:55 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 /** Manage snapshottable directories and their snapshots. */
 public class SnapshotManager implements SnapshotStats {
@@ -124,10 +125,10 @@ public class SnapshotManager implements 
     /** Process snapshot creation recursively. */
     private void processRecursively(final INodeDirectory srcDir,
         final INodeDirectory dstDir) throws IOException {
-      final List<INode> children = srcDir.getChildren();
-      if (children != null) {
+      final ReadOnlyList<INode> children = srcDir.getChildrenList(null);
+      if (!children.isEmpty()) {
         final List<INode> inodes = new ArrayList<INode>(children.size());
-        for(final INode c : new ArrayList<INode>(children)) {
+        for(final INode c : new ArrayList<INode>(ReadOnlyList.Util.asList(children)))
{
           final INode i;
           if (c == null) {
             i = null;

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java?rev=1408923&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java
(added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java
Tue Nov 13 19:59:55 2012
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A {@link ReadOnlyList} is a unmodifiable list,
+ * which supports read-only operations.
+ * 
+ * @param <E> The type of the list elements.
+ */
+@InterfaceAudience.Private
+public interface ReadOnlyList<E> extends Iterable<E> {
+  /**
+   * Is this an empty list?
+   */
+  boolean isEmpty();
+
+  /**
+   * @return the size of this list.
+   */
+  int size();
+
+  /**
+   * @return the i-th element.
+   */
+  E get(int i);
+  
+  /**
+   * Utilities for {@link ReadOnlyList}
+   */
+  public static class Util {
+    /**
+     * The same as {@link Collections#binarySearch(List, Object)}
+     * except that the list is a {@link ReadOnlyList}.
+     *
+     * @return the insertion point defined
+     *         in {@link Collections#binarySearch(List, Object)}.
+     */
+    public static <K, E extends Comparable<K>> int binarySearch(
+        final ReadOnlyList<E> list, final K key) {
+      return Collections.binarySearch(asList(list), key);
+    }
+
+    /**
+     * @return a {@link ReadOnlyList} view of the given list.
+     */
+    public static <E> ReadOnlyList<E> asReadOnlyList(final List<E> list)
{
+      return new ReadOnlyList<E>() {
+        @Override
+        public Iterator<E> iterator() {
+          return list.iterator();
+        }
+
+        @Override
+        public boolean isEmpty() {
+          return list.isEmpty();
+        }
+
+        @Override
+        public int size() {
+          return list.size();
+        }
+
+        @Override
+        public E get(int i) {
+          return list.get(i);
+        }
+      };
+    }
+
+    /**
+     * @return a {@link List} view of the given list.
+     */
+    public static <E> List<E> asList(final ReadOnlyList<E> list) {
+      return new List<E>() {
+        @Override
+        public Iterator<E> iterator() {
+          return list.iterator();
+        }
+
+        @Override
+        public boolean isEmpty() {
+          return list.isEmpty();
+        }
+
+        @Override
+        public int size() {
+          return list.size();
+        }
+
+        @Override
+        public E get(int i) {
+          return list.get(i);
+        }
+
+        @Override
+        public Object[] toArray() {
+          final Object[] a = new Object[size()];
+          for(int i = 0; i < a.length; i++) {
+            a[i] = get(i);
+          }
+          return a;
+        }
+
+        //All methods below are not supported.
+
+        @Override
+        public boolean add(E e) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void add(int index, E element) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean addAll(Collection<? extends E> c) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean addAll(int index, Collection<? extends E> c) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public void clear() {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean contains(Object o) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean containsAll(Collection<?> c) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int indexOf(Object o) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public int lastIndexOf(Object o) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public ListIterator<E> listIterator() {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public ListIterator<E> listIterator(int index) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean remove(Object o) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public E remove(int index) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean removeAll(Collection<?> c) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public boolean retainAll(Collection<?> c) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public E set(int index, E element) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public List<E> subList(int fromIndex, int toIndex) {
+          throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public <T> T[] toArray(T[] a) {
+          throw new UnsupportedOperationException();
+        }
+      };
+    }
+  }
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1408923&r1=1408922&r2=1408923&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
(original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
Tue Nov 13 19:59:55 2012
@@ -118,7 +118,8 @@ public class TestSnapshotPathINodes {
       final Snapshot snapshot, int index) {
     assertEquals(isSnapshot, inodesInPath.isSnapshot());
     assertEquals(index, inodesInPath.getSnapshotRootIndex());
-    assertEquals(snapshot, inodesInPath.getSnapshot());
+    assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot());
+    assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot());
   }
 
   /** 



Mime
View raw message