hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1424782 [2/2] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/h...
Date Fri, 21 Dec 2012 01:30:50 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Fri Dec 21 01:30:49 2012
@@ -19,12 +19,16 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /** The directory with snapshots. */
@@ -182,12 +186,12 @@ public class INodeDirectoryWithSnapshot 
       INode previous = null;
       Integer d = null;
       if (c >= 0) {
-        // inode is already in c-list,
+        // Case 1.1.3: inode is already in c-list,
         previous = created.set(c, newinode);
       } else {
         d = search(deleted, oldinode);
         if (d < 0) {
-          // neither in c-list nor d-list
+          // Case 2.3: neither in c-list nor d-list
           insertCreated(newinode, c);
           insertDeleted(oldinode, d);
         }
@@ -302,8 +306,356 @@ public class INodeDirectoryWithSnapshot 
           + ", deleted=" + toString(deleted) + "}";
     }
   }
+  
+  /**
+   * The difference between two snapshots. {@link INodeDirectoryWithSnapshot}
+   * maintains a list of snapshot diffs,
+   * <pre>
+   *   d_1 -> d_2 -> ... -> d_n -> null,
+   * </pre>
+   * where -> denotes the {@link SnapshotDiff#posteriorDiff} reference. The
+   * current directory state is stored in the field of {@link INodeDirectory}.
+   * The snapshot state can be obtained by applying the diffs one-by-one in
+   * reversed chronological order.  Let s_1, s_2, ..., s_n be the corresponding
+   * snapshots.  Then,
+   * <pre>
+   *   s_n                     = (current state) - d_n;
+   *   s_{n-1} = s_n - d_{n-1} = (current state) - d_n - d_{n-1};
+   *   ...
+   *   s_k     = s_{k+1} - d_k = (current state) - d_n - d_{n-1} - ... - d_k.
+   * </pre>
+   */
+  class SnapshotDiff implements Comparable<Snapshot> {
+    /** The snapshot will be obtained after this diff is applied. */
+    final Snapshot snapshot;
+    /** The size of the children list at snapshot creation time. */
+    final int childrenSize;
+    /**
+     * Posterior diff is the diff happened after this diff.
+     * The posterior diff should be first applied to obtain the posterior
+     * snapshot and then apply this diff in order to obtain this snapshot.
+     * If the posterior diff is null, the posterior state is the current state. 
+     */
+    private SnapshotDiff posteriorDiff;
+    /** The children list diff. */
+    private final Diff diff = new Diff();
+    /** The snapshot inode data.  It is null when there is no change. */
+    private INodeDirectory snapshotINode = null;
+
+    private SnapshotDiff(Snapshot snapshot, INodeDirectory dir) {
+      Preconditions.checkNotNull(snapshot, "snapshot is null");
+
+      this.snapshot = snapshot;
+      this.childrenSize = dir.getChildrenList(null).size();
+    }
 
-  public INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt) {
+    /** Compare diffs with snapshot ID. */
+    @Override
+    public int compareTo(final Snapshot that_snapshot) {
+      return Snapshot.ID_COMPARATOR.compare(this.snapshot, that_snapshot);
+    }
+    
+    /** Is the inode the root of the snapshot? */
+    boolean isSnapshotRoot() {
+      return snapshotINode == snapshot.getRoot();
+    }
+
+    /** Copy the INode state to the snapshot if it is not done already. */
+    private Pair<INodeDirectory, INodeDirectory> checkAndInitINode(
+        INodeDirectory snapshotCopy) {
+      if (snapshotINode != null) {
+        // already initialized.
+        return null;
+      }
+      final INodeDirectoryWithSnapshot dir = INodeDirectoryWithSnapshot.this;
+      if (snapshotCopy == null) {
+        snapshotCopy = new INodeDirectory(dir, false);
+      }
+      return new Pair<INodeDirectory, INodeDirectory>(dir, snapshotCopy);
+    }
+
+    /** @return the snapshot object of this diff. */
+    Snapshot getSnapshot() {
+      return snapshot;
+    }
+
+    private INodeDirectory getSnapshotINode() {
+      // get from this diff, then the posterior diff and then the current inode
+      return snapshotINode != null? snapshotINode
+          : posteriorDiff != null? posteriorDiff.getSnapshotINode()
+              : INodeDirectoryWithSnapshot.this; 
+    }
+
+    /**
+     * @return The children list of a directory in a snapshot.
+     *         Since the snapshot is read-only, the logical view of the list is
+     *         never changed although the internal data structure may mutate.
+     */
+    ReadOnlyList<INode> getChildrenList() {
+      return new ReadOnlyList<INode>() {
+        private List<INode> children = null;
+
+        private List<INode> initChildren() {
+          if (children == null) {
+            final ReadOnlyList<INode> posterior = posteriorDiff != null?
+                posteriorDiff.getChildrenList()
+                : INodeDirectoryWithSnapshot.this.getChildrenList(null);
+            children = diff.apply2Current(ReadOnlyList.Util.asList(posterior));
+          }
+          return children;
+        }
+
+        @Override
+        public Iterator<INode> iterator() {
+          return initChildren().iterator();
+        }
+    
+        @Override
+        public boolean isEmpty() {
+          return childrenSize == 0;
+        }
+    
+        @Override
+        public int size() {
+          return childrenSize;
+        }
+    
+        @Override
+        public INode get(int i) {
+          return initChildren().get(i);
+        }
+      };
+    }
+
+    /** @return the child with the given name. */
+    INode getChild(byte[] name, boolean checkPosterior) {
+      final INode[] array = diff.accessPrevious(name);
+      if (array != null) {
+        // this diff is able to find it
+        return array[0]; 
+      } else if (!checkPosterior) {
+        // Since checkPosterior is false, return null, i.e. not found.   
+        return null;
+      } else {
+        // return the posterior INode.
+        return posteriorDiff != null? posteriorDiff.getChild(name, true)
+            : INodeDirectoryWithSnapshot.this.getChild(name, null);
+      }
+    }
+    
+    @Override
+    public String toString() {
+      return "\n  " + snapshot + " (-> "
+          + (posteriorDiff == null? null: posteriorDiff.snapshot)
+          + ") childrenSize=" + childrenSize + ", " + diff;
+    }
+  }
+  
+  /** Create an {@link INodeDirectoryWithSnapshot} with the given snapshot.*/
+  public static INodeDirectoryWithSnapshot newInstance(INodeDirectory dir,
+      Snapshot latest) {
+    final INodeDirectoryWithSnapshot withSnapshot
+        = new INodeDirectoryWithSnapshot(dir, true, null);
+    if (latest != null) {
+      // add a diff for the latest snapshot
+      withSnapshot.addSnapshotDiff(latest, dir, false);
+    }
+    return withSnapshot;
+  }
+
+  /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
+  private final List<SnapshotDiff> diffs;
+
+  INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
+      List<SnapshotDiff> diffs) {
     super(that, adopt, that.getNsQuota(), that.getDsQuota());
+    this.diffs = diffs != null? diffs: new ArrayList<SnapshotDiff>();
+  }
+
+  /** Add a {@link SnapshotDiff} for the given snapshot and directory. */
+  SnapshotDiff addSnapshotDiff(Snapshot snapshot, INodeDirectory dir,
+      boolean isSnapshotCreation) {
+    final SnapshotDiff last = getLastSnapshotDiff();
+    final SnapshotDiff d = new SnapshotDiff(snapshot, dir); 
+
+    if (isSnapshotCreation) {
+      //for snapshot creation, snapshotINode is the same as the snapshot root
+      d.snapshotINode = snapshot.getRoot();
+    }
+    diffs.add(d);
+    if (last != null) {
+      last.posteriorDiff = d;
+    }
+    return d;
+  }
+
+  SnapshotDiff getLastSnapshotDiff() {
+    final int n = diffs.size();
+    return n == 0? null: diffs.get(n - 1);
+  }
+
+  /** @return the last snapshot. */
+  public Snapshot getLastSnapshot() {
+    final SnapshotDiff last = getLastSnapshotDiff();
+    return last == null? null: last.getSnapshot();
+  }
+
+  /**
+   * Check if the latest snapshot diff exists.  If not, add it.
+   * @return the latest snapshot diff, which is never null.
+   */
+  private SnapshotDiff checkAndAddLatestSnapshotDiff(Snapshot latest) {
+    final SnapshotDiff last = getLastSnapshotDiff();
+    return last != null && last.snapshot.equals(latest)? last
+        : addSnapshotDiff(latest, this, false);
+  }
+  
+  /**
+   * Check if the latest {@link Diff} exists.  If not, add it.
+   * @return the latest {@link Diff}, which is never null.
+   */
+  Diff checkAndAddLatestDiff(Snapshot latest) {
+    return checkAndAddLatestSnapshotDiff(latest).diff;
+  }
+
+  /**
+   * @return {@link #snapshots}
+   */
+  @VisibleForTesting
+  List<SnapshotDiff> getSnapshotDiffs() {
+    return diffs;
+  }
+
+  /**
+   * @return the diff corresponding to the given snapshot.
+   *         When the diff is null, it means that the current state and
+   *         the corresponding snapshot state are the same. 
+   */
+  SnapshotDiff getSnapshotDiff(Snapshot snapshot) {
+    if (snapshot == null) {
+      // snapshot == null means the current state, therefore, return null.
+      return null;
+    }
+    final int i = Collections.binarySearch(diffs, snapshot);
+    if (i >= 0) {
+      // exact match
+      return diffs.get(i);
+    } else {
+      // Exact match not found means that there were no changes between
+      // given snapshot and the next state so that the diff for the given
+      // snapshot was not recorded.  Thus, return the next state.
+      final int j = -i - 1;
+      return j < diffs.size()? diffs.get(j): null;
+    }
+  }
+
+  @Override
+  public Pair<INodeDirectory, INodeDirectory> recordModification(Snapshot latest) {
+    return save2Snapshot(latest, null);
+  }
+
+  public Pair<INodeDirectory, INodeDirectory> save2Snapshot(Snapshot latest,
+      INodeDirectory snapshotCopy) {
+    return latest == null? null
+        : checkAndAddLatestSnapshotDiff(latest).checkAndInitINode(snapshotCopy);
+  }
+
+  @Override
+  public Pair<? extends INode, ? extends INode> saveChild2Snapshot(
+      INode child, Snapshot latest) {
+    Preconditions.checkArgument(!child.isDirectory(),
+        "child is a directory, child=%s", child);
+
+    final SnapshotDiff diff = checkAndAddLatestSnapshotDiff(latest);
+    if (diff.getChild(child.getLocalNameBytes(), false) != null) {
+      // it was already saved in the latest snapshot earlier.  
+      return null;
+    }
+
+    final Pair<? extends INode, ? extends INode> p = child.createSnapshotCopy();
+    diff.diff.modify(p.right, p.left);
+    return p;
+  }
+
+  @Override
+  public boolean addChild(INode inode, boolean setModTime, Snapshot latest) {
+    Diff diff = null;
+    Integer undoInfo = null;
+    if (latest != null) {
+      diff = checkAndAddLatestDiff(latest);
+      undoInfo = diff.create(inode);
+    }
+    final boolean added = super.addChild(inode, setModTime, null);
+    if (!added && undoInfo != null) {
+      diff.undoCreate(inode, undoInfo);
+    }
+    return added; 
+  }
+
+  @Override
+  public INode removeChild(INode child, Snapshot latest) {
+    Diff diff = null;
+    Triple<Integer, INode, Integer> undoInfo = null;
+    if (latest != null) {
+      diff = checkAndAddLatestDiff(latest);
+      undoInfo = diff.delete(child);
+    }
+    final INode removed = super.removeChild(child, null);
+    if (removed == null && undoInfo != null) {
+      diff.undoDelete(child, undoInfo);
+    }
+    return removed;
+  }
+
+  @Override
+  public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getChildrenList(): super.getChildrenList(null);
+  }
+
+  @Override
+  public INode getChild(byte[] name, Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getChild(name, true): super.getChild(name, null);
+  }
+
+  @Override
+  public String getUserName(Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getSnapshotINode().getUserName()
+        : super.getUserName(null);
+  }
+
+  @Override
+  public String getGroupName(Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getSnapshotINode().getGroupName()
+        : super.getGroupName(null);
+  }
+
+  @Override
+  public FsPermission getFsPermission(Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getSnapshotINode().getFsPermission()
+        : super.getFsPermission(null);
+  }
+
+  @Override
+  public long getAccessTime(Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getSnapshotINode().getAccessTime()
+        : super.getAccessTime(null);
+  }
+
+  @Override
+  public long getModificationTime(Snapshot snapshot) {
+    final SnapshotDiff diff = getSnapshotDiff(snapshot);
+    return diff != null? diff.getSnapshotINode().getModificationTime()
+        : super.getModificationTime(null);
+  }
+  
+  @Override
+  public String toString() {
+    return super.toString() + ", diffs=" + getSnapshotDiffs();
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java Fri Dec 21 01:30:49 2012
@@ -52,7 +52,7 @@ public class INodeFileWithLink extends I
   }
   
   /** Insert inode to the circular linked list. */
-  public void insert(INodeFileWithLink inode) {
+  void insert(INodeFileWithLink inode) {
     inode.setNext(this.getNext());
     this.setNext(inode);
   }
@@ -112,10 +112,10 @@ public class INodeFileWithLink extends I
       // linked INodes, so that in case the current INode is retrieved from the
       // blocksMap before it is removed or updated, the correct replication
       // number can be retrieved.
-      this.setFileReplication(maxReplication);
+      this.setFileReplication(maxReplication, null);
       this.next = null;
       // clear parent
-      parent = null;
+      setParent(null);
     }
     return 1;
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Fri Dec 21 01:30:49 2012
@@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.server.na
 import java.util.Comparator;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
 /** Snapshot of a sub-tree in the namesystem. */
 @InterfaceAudience.Private
@@ -37,19 +40,52 @@ public class Snapshot implements Compara
     }
   };
 
+  /** @return the latest snapshot taken on the given inode. */
+  public static Snapshot findLatestSnapshot(INode inode) {
+    Snapshot latest = null;
+    for(; inode != null; inode = inode.getParent()) {
+      if (inode instanceof INodeDirectorySnapshottable) {
+        final Snapshot s = ((INodeDirectorySnapshottable)inode).getLastSnapshot();
+        if (ID_COMPARATOR.compare(latest, s) < 0) {
+          latest = s;
+        }
+      }
+    }
+    return latest;
+  }
+
+  /** The root directory of the snapshot. */
+  public class Root extends INodeDirectory {
+    Root(INodeDirectory other) {
+      super(other, false);
+    }
+
+    @Override
+    public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) {
+      return getParent().getChildrenList(snapshot);
+    }
+
+    @Override
+    public INode getChild(byte[] name, Snapshot snapshot) {
+      return getParent().getChild(name, snapshot);
+    }
+  }
+
   /** Snapshot ID. */
   private final int id;
   /** The root directory of the snapshot. */
-  private final INodeDirectoryWithSnapshot root;
+  private final Root root;
 
   Snapshot(int id, String name, INodeDirectorySnapshottable dir) {
     this.id = id;
-    this.root = new INodeDirectoryWithSnapshot(dir, false);
+    this.root = new Root(dir);
+
     this.root.setLocalName(name);
+    this.root.setParent(dir);
   }
 
   /** @return the root directory of the snapshot. */
-  public INodeDirectoryWithSnapshot getRoot() {
+  public Root getRoot() {
     return root;
   }
 
@@ -59,7 +95,22 @@ public class Snapshot implements Compara
   }
   
   @Override
+  public boolean equals(Object that) {
+    if (this == that) {
+      return true;
+    } else if (that == null || !(that instanceof Snapshot)) {
+      return false;
+    }
+    return this.id == ((Snapshot)that).id;
+  }
+  
+  @Override
+  public int hashCode() {
+    return id;
+  }
+  
+  @Override
   public String toString() {
-    return getClass().getSimpleName() + ":" + root.getLocalName();
+    return getClass().getSimpleName() + "." + root.getLocalName();
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Fri Dec 21 01:30:49 2012
@@ -24,12 +24,8 @@ import java.util.concurrent.atomic.Atomi
 
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
-import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
-import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 
 /**
  * Manage snapshottable directories and their snapshots.
@@ -44,7 +40,6 @@ import org.apache.hadoop.hdfs.util.ReadO
  * if necessary.
  */
 public class SnapshotManager implements SnapshotStats {
-  private final FSNamesystem namesystem;
   private final FSDirectory fsdir;
 
   private final AtomicInteger numSnapshottableDirs = new AtomicInteger();
@@ -56,9 +51,7 @@ public class SnapshotManager implements 
   private final List<INodeDirectorySnapshottable> snapshottables
       = new ArrayList<INodeDirectorySnapshottable>();
 
-  public SnapshotManager(final FSNamesystem namesystem,
-      final FSDirectory fsdir) {
-    this.namesystem = namesystem;
+  public SnapshotManager(final FSDirectory fsdir) {
     this.fsdir = fsdir;
   }
 
@@ -66,20 +59,19 @@ public class SnapshotManager implements 
    * Set the given directory as a snapshottable directory.
    * If the path is already a snapshottable directory, update the quota.
    */
-  public void setSnapshottable(final String path, final int snapshotQuota
-      ) throws IOException {
-    final INodeDirectory d = INodeDirectory.valueOf(fsdir.getINode(path), path);
+  public void setSnapshottable(final String path) throws IOException {
+    final INodesInPath iip = fsdir.getINodesInPath(path);
+    final INodeDirectory d = INodeDirectory.valueOf(iip.getINode(0), path);
     if (d.isSnapshottable()) {
       //The directory is already a snapshottable directory.
-      ((INodeDirectorySnapshottable)d).setSnapshotQuota(snapshotQuota);
+      ((INodeDirectorySnapshottable)d).setSnapshotQuota(
+          INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
       return;
     }
 
     final INodeDirectorySnapshottable s
-        = INodeDirectorySnapshottable.newInstance(d, snapshotQuota);
-    fsdir.replaceINodeDirectory(path, d, s);
+        = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot());
     snapshottables.add(s);
-
     numSnapshottableDirs.getAndIncrement();
   }
 
@@ -90,15 +82,15 @@ public class SnapshotManager implements 
    */
   public void resetSnapshottable(final String path
       ) throws IOException {
+    final INodesInPath iip = fsdir.getINodesInPath(path);
     final INodeDirectorySnapshottable s = INodeDirectorySnapshottable.valueOf(
-        fsdir.getINode(path), path);
+        iip.getINode(0), path);
     if (s.getNumSnapshots() > 0) {
       throw new SnapshotException("The directory " + path + " has snapshot(s). "
           + "Please redo the operation after removing all the snapshots.");
     }
 
-    final INodeDirectory d = new INodeDirectory(s, true);
-    fsdir.replaceINodeDirectory(path, s, d);
+    s.replaceSelf(iip.getLatestSnapshot());
     snapshottables.remove(s);
 
     numSnapshottableDirs.getAndDecrement();
@@ -119,10 +111,10 @@ public class SnapshotManager implements 
   public void createSnapshot(final String snapshotName, final String path
       ) throws IOException {
     // Find the source root directory path where the snapshot is taken.
+    final INodesInPath i = fsdir.getMutableINodesInPath(path);
     final INodeDirectorySnapshottable srcRoot
-        = INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
-    final Snapshot s = srcRoot.addSnapshot(snapshotID, snapshotName);
-    new SnapshotCreation().processRecursively(srcRoot, s.getRoot());
+        = INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
+    srcRoot.addSnapshot(snapshotID, snapshotName);
       
     //create success, update id
     snapshotID++;
@@ -154,83 +146,6 @@ public class SnapshotManager implements 
     srcRoot.renameSnapshot(path, oldSnapshotName, newSnapshotName);
   }
   
-  /**
-   * Create a snapshot of subtrees by recursively coping the directory
-   * structure from the source directory to the snapshot destination directory.
-   * This creation algorithm requires O(N) running time and O(N) memory,
-   * where N = # files + # directories + # symlinks. 
-   */
-  class SnapshotCreation {
-    /** Process snapshot creation recursively. */
-    private void processRecursively(final INodeDirectory srcDir,
-        final INodeDirectory dstDir) throws IOException {
-      final ReadOnlyList<INode> children = srcDir.getChildrenList(null);
-      if (!children.isEmpty()) {
-        final List<INode> inodes = new ArrayList<INode>(children.size());
-        for(final INode c : new ArrayList<INode>(ReadOnlyList.Util.asList(children))) {
-          final INode i;
-          if (c == null) {
-            i = null;
-          } else if (c instanceof INodeDirectory) {
-            //also handle INodeDirectoryWithQuota
-            i = processINodeDirectory((INodeDirectory)c);
-          } else if (c instanceof INodeFileUnderConstruction) {
-            //TODO: support INodeFileUnderConstruction
-            throw new IOException("Not yet supported.");
-          } else if (c instanceof INodeFile) {
-            i = processINodeFile(srcDir, (INodeFile)c);
-          } else if (c instanceof INodeSymlink) {
-            i = new INodeSymlink((INodeSymlink)c);
-          } else {
-            throw new AssertionError("Unknow INode type: " + c.getClass()
-                + ", inode = " + c);
-          }
-          i.setParent(dstDir);
-          inodes.add(i);
-        }
-        dstDir.setChildren(inodes);
-      }
-    }
-    
-    /**
-     * Create destination INodeDirectory and make the recursive call. 
-     * @return destination INodeDirectory.
-     */
-    private INodeDirectory processINodeDirectory(final INodeDirectory srcChild
-        ) throws IOException {
-      final INodeDirectory dstChild = new INodeDirectory(srcChild, false);
-      dstChild.setChildren(null);
-      processRecursively(srcChild, dstChild);
-      return dstChild;
-    }
-
-    /**
-     * Create destination INodeFileSnapshot and update source INode type.
-     * @return destination INodeFileSnapshot.
-     */
-    private INodeFileSnapshot processINodeFile(final INodeDirectory parent,
-        final INodeFile file) {
-      final INodeFileWithLink srcWithLink;
-      //check source INode type
-      if (file instanceof INodeFileWithLink) {
-        srcWithLink = (INodeFileWithLink)file;
-      } else {
-        //source is an INodeFile, replace the source.
-        srcWithLink = new INodeFileWithLink(file);
-        file.removeNode();
-        parent.addChild(srcWithLink, false);
-
-        //update block map
-        namesystem.getBlockManager().addBlockCollection(srcWithLink);
-      }
-      
-      //insert the snapshot to src's linked list.
-      final INodeFileSnapshot snapshot = new INodeFileSnapshot(srcWithLink); 
-      srcWithLink.insert(snapshot);
-      return snapshot;
-    }
-  }
-
   @Override
   public long getNumSnapshottableDirs() {
     return numSnapshottableDirs.get();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Fri Dec 21 01:30:49 2012
@@ -158,7 +158,7 @@ public class TestFsLimits {
     Class<?> generated = null;
     try {
       fs.verifyFsLimits(inodes, 1, child);
-      rootInode.addChild(child, false);
+      rootInode.addChild(child, false, null);
     } catch (QuotaExceededException e) {
       generated = e.getClass();
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Fri Dec 21 01:30:49 2012
@@ -149,11 +149,11 @@ public class TestINodeFile {
     assertEquals("f", inf.getFullPathName());
     assertEquals("", inf.getLocalParentDir());
 
-    dir.addChild(inf, false);
+    dir.addChild(inf, false, null);
     assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
     assertEquals("d", inf.getLocalParentDir());
     
-    root.addChild(dir, false);
+    root.addChild(dir, false, null);
     assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
     assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Fri Dec 21 01:30:49 2012
@@ -120,6 +120,14 @@ public class TestSnapshotPathINodes {
     assertEquals(index, inodesInPath.getSnapshotRootIndex());
     assertEquals(isSnapshot? snapshot: null, inodesInPath.getPathSnapshot());
     assertEquals(isSnapshot? null: snapshot, inodesInPath.getLatestSnapshot());
+    if (isSnapshot && index >= 0) {
+      assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass());
+    }
+  }
+
+  static void assertINodeFile(INode inode, Path path) {
+    assertEquals(path.getName(), inode.getLocalName());
+    assertEquals(INodeFile.class, inode.getClass());
   }
 
   /** 
@@ -140,6 +148,8 @@ public class TestSnapshotPathINodes {
     assertSnapshot(nodesInPath, false, null, -1);
 
     // The last INode should be associated with file1
+    assertTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath,
+        inodes[components.length - 1] != null);
     assertEquals(inodes[components.length - 1].getFullPathName(),
         file1.toString());
     assertEquals(inodes[components.length - 2].getFullPathName(),
@@ -189,12 +199,9 @@ public class TestSnapshotPathINodes {
     // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
     final Snapshot snapshot = getSnapshot(nodesInPath, "s1");
     assertSnapshot(nodesInPath, true, snapshot, 3);
-    assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof 
-        INodeDirectoryWithSnapshot);
     // Check the INode for file1 (snapshot file)
     INode snapshotFileNode = inodes[inodes.length - 1]; 
-    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
-    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    assertINodeFile(snapshotFileNode, file1);
     assertTrue(snapshotFileNode.getParent() instanceof 
         INodeDirectoryWithSnapshot);
     
@@ -206,9 +213,7 @@ public class TestSnapshotPathINodes {
     // snapshotRootIndex should be -1.
     assertSnapshot(nodesInPath, true, snapshot, -1);
     // Check the INode for file1 (snapshot file)
-    snapshotFileNode = inodes[inodes.length - 1]; 
-    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
-    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    assertINodeFile(nodesInPath.getLastINode(), file1);
     
     // Call getExistingPathINodes and request 2 INodes.
     nodesInPath = fsdir.rootDir.getExistingPathINodes(components, 2, false);
@@ -217,10 +222,7 @@ public class TestSnapshotPathINodes {
     // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
     // SnapshotRootIndex should be 0.
     assertSnapshot(nodesInPath, true, snapshot, 0);
-    snapshotFileNode = inodes[inodes.length - 1];
-    // Check the INode for snapshot of file1
-    assertEquals(snapshotFileNode.getLocalName(), file1.getName());
-    assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
+    assertINodeFile(nodesInPath.getLastINode(), file1);
     
     // Resolve the path "/TestSnapshot/sub1/.snapshot"  
     String dotSnapshotPath = sub1.toString() + "/.snapshot";
@@ -271,14 +273,8 @@ public class TestSnapshotPathINodes {
       snapshot = getSnapshot(nodesInPath, "s2");
       assertSnapshot(nodesInPath, true, snapshot, 3);
   
-      assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof 
-          INodeDirectoryWithSnapshot);
       // Check the INode for file1 (snapshot file)
-      INode snapshotFileNode = inodes[inodes.length - 1]; 
-      assertEquals(snapshotFileNode.getLocalName(), file1.getName());
-      assertTrue(snapshotFileNode instanceof INodeFileSnapshot);
-      assertTrue(snapshotFileNode.getParent() instanceof 
-          INodeDirectoryWithSnapshot);
+      assertINodeFile(inodes[inodes.length - 1], file1);
     }
 
     // Check the INodes for path /TestSnapshot/sub1/file1
@@ -339,12 +335,8 @@ public class TestSnapshotPathINodes {
       // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null}
       assertSnapshot(nodesInPath, true, s4, 3);
   
-      assertTrue(inodes[nodesInPath.getSnapshotRootIndex()] instanceof 
-          INodeDirectoryWithSnapshot);
       // Check the last INode in inodes, which should be null
       assertNull(inodes[inodes.length - 1]);
-      assertTrue(inodes[inodes.length - 2] instanceof 
-          INodeDirectoryWithSnapshot);
     }
 
     // Check the inodes for /TestSnapshot/sub1/file3
@@ -372,7 +364,8 @@ public class TestSnapshotPathINodes {
    * Test {@link INodeDirectory#getExistingPathINodes(byte[][], int, boolean)} 
    * for snapshot file while modifying file after snapshot.
    */
-  @Test
+//  TODO: disable it temporarily since it uses append.
+//  @Test
   public void testSnapshotPathINodesAfterModification() throws Exception {
     //file1 was deleted, create it again.
     DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
@@ -430,10 +423,10 @@ public class TestSnapshotPathINodes {
     // The number of inodes should be equal to components.length
     assertEquals(newInodes.length, components.length);
     // The last INode should be associated with file1
-    assertEquals(newInodes[components.length - 1].getFullPathName(),
-        file1.toString());
+    final int last = components.length - 1;
+    assertEquals(newInodes[last].getFullPathName(), file1.toString());
     // The modification time of the INode for file3 should have been changed
-    Assert.assertFalse(inodes[components.length - 1].getModificationTime() ==
-        newInodes[components.length - 1].getModificationTime());
+    Assert.assertFalse(inodes[last].getModificationTime()
+        == newInodes[last].getModificationTime());
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java Fri Dec 21 01:30:49 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
@@ -31,6 +32,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.INode;
 
 /**
  * Helper for writing snapshot related tests
@@ -283,4 +285,11 @@ public class SnapshotTestHelper {
       }
     }
   }
+
+  static void dumpTreeRecursively(INode inode) {
+    if (INode.LOG.isDebugEnabled()) {
+      inode.dumpTreeRecursively(
+          new PrintWriter(System.out, true), new StringBuilder(), null);
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeDirectoryWithSnapshot.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeDirectoryWithSnapshot.java Fri Dec 21 01:30:49 2012
@@ -268,8 +268,8 @@ public class TestINodeDirectoryWithSnaps
     final int i = Diff.search(current, inode);
     Assert.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
-    final INodeDirectory newinode = oldinode.createSnapshotCopy().right;
-    newinode.updateModificationTime(oldinode.getModificationTime() + 1);
+    final INodeDirectory newinode = new INodeDirectory(oldinode, false);
+    newinode.updateModificationTime(oldinode.getModificationTime() + 1, null);
 
     current.set(i, newinode);
     if (diff != null) {
@@ -305,7 +305,7 @@ public class TestINodeDirectoryWithSnaps
   public void testIdCmp() {
     final INodeDirectory dir = new INodeDirectory("foo", PERM);
     final INodeDirectorySnapshottable snapshottable
-        = INodeDirectorySnapshottable.newInstance(dir, 100);
+        = new INodeDirectorySnapshottable(dir);
     final Snapshot[] snapshots = {
       new Snapshot(1, "s1", snapshottable),
       new Snapshot(1, "s1", snapshottable),

Added: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1424782&view=auto
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (added)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Fri Dec 21 01:30:49 2012
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/** Testing nested snapshots. */
+public class TestNestedSnapshots {
+  {
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)Server.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(UserGroupInformation.class)).getLogger().setLevel(Level.OFF);
+  }
+
+  private static final long SEED = 0;
+  private static final short REPLICATION = 3;
+  private static final long BLOCKSIZE = 1024;
+  
+  private static Configuration conf = new Configuration();
+  private static MiniDFSCluster cluster;
+  private static FSNamesystem fsn;
+  private static DistributedFileSystem hdfs;
+  
+  @BeforeClass
+  public static void setUp() throws Exception {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+        .build();
+    cluster.waitActive();
+
+    fsn = cluster.getNamesystem();
+    hdfs = cluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Create a snapshot for /test/foo and create another snapshot for
+   * /test/foo/bar.  Files created before the snapshots should appear in both
+   * snapshots and the files created after the snapshots should not appear in
+   * any of the snapshots.  
+   */
+  @Test
+  public void testNestedSnapshots() throws Exception {
+    final Path foo = new Path("/test/foo");
+    final Path bar = new Path(foo, "bar");
+    final Path file1 = new Path(bar, "file1");
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
+    print("create file " + file1);
+
+    final String s1name = "foo-s1";
+    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, s1name); 
+    hdfs.allowSnapshot(foo.toString());
+    print("allow snapshot " + foo);
+    hdfs.createSnapshot(s1name, foo.toString());
+    print("create snapshot " + s1name);
+
+    final String s2name = "bar-s2";
+    final Path s2path = SnapshotTestHelper.getSnapshotRoot(bar, s2name); 
+    hdfs.allowSnapshot(bar.toString());
+    print("allow snapshot " + bar);
+    hdfs.createSnapshot(s2name, bar.toString());
+    print("create snapshot " + s2name);
+
+    final Path file2 = new Path(bar, "file2");
+    DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, SEED);
+    print("create file " + file2);
+    
+    assertFile(s1path, s2path, file1, true, true, true);
+    assertFile(s1path, s2path, file2, true, false, false);
+  }
+
+  private static void print(String mess) throws UnresolvedLinkException {
+    System.out.println("XXX " + mess);
+    SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/"));
+  }
+
+  private static void assertFile(Path s1, Path s2, Path file,
+      Boolean... expected) throws IOException {
+    final Path[] paths = {
+        file,
+        new Path(s1, "bar/" + file.getName()),
+        new Path(s2, file.getName())
+    };
+    Assert.assertEquals(expected.length, paths.length);
+    for(int i = 0; i < paths.length; i++) {
+      final boolean computed = hdfs.exists(paths[i]);
+      Assert.assertEquals("Failed on " + paths[i], expected[i], computed);
+    }
+  }
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Fri Dec 21 01:30:49 2012
@@ -51,7 +51,7 @@ import org.junit.rules.ExpectedException
  * ensure snapshots remain unchanges.
  */
 public class TestSnapshot {
-  protected static final long seed = 0;
+  private static final long seed = Time.now();
   protected static final short REPLICATION = 3;
   protected static final long BLOCKSIZE = 1024;
   /** The number of times snapshots are created for a snapshottable directory  */
@@ -64,7 +64,7 @@ public class TestSnapshot {
   protected static FSNamesystem fsn;
   protected DistributedFileSystem hdfs;
 
-  private static Random random = new Random(Time.now());
+  private static Random random = new Random(seed);
   
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -124,7 +124,7 @@ public class TestSnapshot {
     TestDirectoryTree.Node[] nodes = new TestDirectoryTree.Node[2];
     // Each time we will create a snapshot for the top level dir
     Path root = SnapshotTestHelper.createSnapshot(hdfs,
-        dirTree.topNode.nodePath, this.genSnapshotName());
+        dirTree.topNode.nodePath, genSnapshotName());
     snapshotList.add(root);
     nodes[0] = dirTree.topNode; 
     SnapshotTestHelper.checkSnapshotCreation(hdfs, root, nodes[0].nodePath);
@@ -136,7 +136,7 @@ public class TestSnapshot {
     excludedList.add(nodes[0]);
     nodes[1] = dirTree.getRandomDirNode(random, excludedList);
     root = SnapshotTestHelper.createSnapshot(hdfs, nodes[1].nodePath,
-        this.genSnapshotName());
+        genSnapshotName());
     snapshotList.add(root);
     SnapshotTestHelper.checkSnapshotCreation(hdfs, root, nodes[1].nodePath);
     return nodes;
@@ -172,6 +172,8 @@ public class TestSnapshot {
       // make changes to the current directory
       modifyCurrentDirAndCheckSnapshots(mods);
     }
+    System.out.println("XXX done:");
+    SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/"));
   }
   
   /**
@@ -231,9 +233,10 @@ public class TestSnapshot {
       Modification delete = new FileDeletion(
           node.fileList.get((node.nullFileIndex + 1) % node.fileList.size()),
           hdfs);
-      Modification append = new FileAppend(
-          node.fileList.get((node.nullFileIndex + 2) % node.fileList.size()),
-          hdfs, (int) BLOCKSIZE);
+//      TODO: fix append for snapshots
+//      Modification append = new FileAppend(
+//          node.fileList.get((node.nullFileIndex + 2) % node.fileList.size()),
+//          hdfs, (int) BLOCKSIZE);
       Modification chmod = new FileChangePermission(
           node.fileList.get((node.nullFileIndex + 3) % node.fileList.size()),
           hdfs, genRandomPermission());
@@ -314,6 +317,11 @@ public class TestSnapshot {
     abstract void modify() throws Exception;
 
     abstract void checkSnapshots() throws Exception;
+    
+    @Override
+    public String toString() {
+      return type + " " + file;
+    }
   }
 
   /**
@@ -497,8 +505,6 @@ public class TestSnapshot {
 
     @Override
     void modify() throws Exception {
-      System.out.println("BEFORE create " + file + "\n"
-              + fsn.getFSDirectory().getINode("/").dumpTreeRecursively());
       DFSTestUtil.createFile(fs, file, fileLen, fileLen, BLOCKSIZE,
           REPLICATION, seed);
     }
@@ -511,9 +517,7 @@ public class TestSnapshot {
         if (snapshotFile != null) {
           boolean computed = fs.exists(snapshotFile);
           boolean expected = fileStatusMap.get(snapshotFile) != null;
-          assertEquals("snapshotFile=" + snapshotFile + "\n"
-              + fsn.getFSDirectory().getINode("/").dumpTreeRecursively(),
-              expected, computed);
+          assertEquals(expected, computed);
           if (computed) {
             FileStatus currentSnapshotStatus = fs.getFileStatus(snapshotFile);
             FileStatus originalStatus = fileStatusMap.get(snapshotFile);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Fri Dec 21 01:30:49 2012
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
@@ -140,9 +139,6 @@ public class TestSnapshotBlocksMap {
     // Check the INode information
     BlockCollection bcAfterDeletion = blockInfoAfterDeletion
         .getBlockCollection();
-    // The INode in the blocksMap should be no longer the original INode for
-    // file0
-    assertFalse(bcAfterDeletion == inode);
     
     // Compare the INode in the blocksMap with INodes for snapshots
     Path snapshot1File0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1",

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Fri Dec 21 01:30:49 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.SnapshotDiff;
 import org.apache.hadoop.ipc.RemoteException;
 import org.junit.After;
 import org.junit.Before;
@@ -89,10 +90,10 @@ public class TestSnapshotRename {
     for (int i = 0; i < listByName.size(); i++) {
       assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
     }
-    List<Snapshot> listByTime = srcRoot.getSnapshots();
+    List<SnapshotDiff> listByTime = srcRoot.getSnapshotDiffs();
     assertEquals(names.length, listByTime.size());
     for (int i = 0; i < listByTime.size(); i++) {
-      assertEquals(names[i], listByTime.get(i).getRoot().getLocalName());
+      assertEquals(names[i], listByTime.get(i).getSnapshot().getRoot().getLocalName());
     }
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java?rev=1424782&r1=1424781&r2=1424782&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java Fri Dec 21 01:30:49 2012
@@ -116,6 +116,11 @@ public class TestSnapshotReplication {
         (short) (REPLICATION - 1));
   }
   
+  INodeFile getINodeFile(Path p) throws Exception {
+    final String s = p.toString();
+    return INodeFile.valueOf(fsdir.getINode(s), s);
+  }
+ 
   /**
    * Check the replication for both the current file and all its prior snapshots
    * 
@@ -133,13 +138,11 @@ public class TestSnapshotReplication {
   private void checkSnapshotFileReplication(Path currentFile,
       Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
     // First check the getBlockReplication for the INode of the currentFile
-    INodeFileWithLink inodeOfCurrentFile = (INodeFileWithLink) fsdir
-        .getINode(currentFile.toString());
+    final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
     assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
     // Then check replication for every snapshot
     for (Path ss : snapshotRepMap.keySet()) {
-      INodeFileWithLink ssInode = (INodeFileWithLink) fsdir.getINode(ss
-          .toString());
+      final INodeFile ssInode = getINodeFile(ss);
       // The replication number derived from the
       // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
       assertEquals(expectedBlockRep, ssInode.getBlockReplication());
@@ -167,9 +170,7 @@ public class TestSnapshotReplication {
       Path snapshot = new Path(snapshotRoot, file1.getName());
       
       // Check the replication stored in the INode of the snapshot of file1
-      INode inode = fsdir.getINode(snapshot.toString());
-      assertTrue(inode instanceof INodeFileWithLink);
-      assertEquals(fileRep, ((INodeFileWithLink) inode).getFileReplication());
+      assertEquals(fileRep, getINodeFile(snapshot).getFileReplication());
       snapshotRepMap.put(snapshot, fileRep);
       
       // Increase the replication factor by 1
@@ -215,8 +216,7 @@ public class TestSnapshotReplication {
     hdfs.delete(file1, true);
     // Check replication of snapshots
     for (Path ss : snapshotRepMap.keySet()) {
-      INodeFileWithLink ssInode = (INodeFileWithLink) fsdir.getINode(ss
-          .toString());
+      final INodeFile ssInode = getINodeFile(ss);
       // The replication number derived from the
       // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
       assertEquals(REPLICATION, ssInode.getBlockReplication());



Mime
View raw message