hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sur...@apache.org
Subject svn commit: r1437256 [1/2] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snaps...
Date Wed, 23 Jan 2013 02:48:02 GMT
Author: suresh
Date: Wed Jan 23 02:48:01 2013
New Revision: 1437256

URL: http://svn.apache.org/viewvc?rev=1437256&view=rev
Log:
HDFS-4126. Add reading/writing snapshot information to FSImage. Contributed by Jing Zhao.

Added:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
Modified:
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeDirectoryWithSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
    hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt Wed Jan 23 02:48:01 2013
@@ -113,3 +113,6 @@ Branch-2802 Snapshot (Unreleased)
   HDFS-4098. Add FileWithSnapshot, INodeFileUnderConstructionWithSnapshot and
   INodeFileUnderConstructionSnapshot for supporting append to snapshotted files.
   (szetszwo)
+
+  HDFS-4126. Add reading/writing snapshot information to FSImage.
+  (Jing Zhao via suresh)

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Wed Jan 23 02:48:01 2013
@@ -211,8 +211,21 @@ public class DFSUtil {
    * Converts a byte array to a string using UTF8 encoding.
    */
   public static String bytes2String(byte[] bytes) {
+    return bytes2String(bytes, 0, bytes.length);
+  }
+  
+  /**
+   * Decode a specific range of bytes of the given byte array to a string
+   * using UTF8.
+   * 
+   * @param bytes The bytes to be decoded into characters
+   * @param offset The index of the first byte to decode
+   * @param length The number of bytes to decode
+   * @return The decoded string
+   */
+  public static String bytes2String(byte[] bytes, int offset, int length) {
     try {
-      return new String(bytes, "UTF8");
+      return new String(bytes, offset, length, "UTF8");
     } catch(UnsupportedEncodingException e) {
       assert false : "UTF8 encoding is not supported ";
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Jan 23 02:48:01 2013
@@ -1060,13 +1060,13 @@ public class FSDirectory implements Clos
     } finally {
       writeUnlock();
     }
+    fsImage.getEditLog().logDelete(src, now);
     if (filesRemoved <= 0) {
       return false;
     }
     incrDeletedFileCount(filesRemoved);
     // Blocks will be deleted later by the caller of this method
     getFSNamesystem().removePathAndBlocks(src, null);
-    fsImage.getEditLog().logDelete(src, now);
     return true;
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Wed Jan 23 02:48:01 2013
@@ -36,16 +36,21 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CreateSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp;
@@ -57,6 +62,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
+import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.util.Holder;
@@ -489,6 +495,41 @@ public class FSEditLogLoader {
       // no data in here currently.
       break;
     }
+    case OP_CREATE_SNAPSHOT: {
+      CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op;
+      fsNamesys.getSnapshotManager().createSnapshot(
+          createSnapshotOp.snapshotRoot, createSnapshotOp.snapshotName);
+      break;
+    }
+    case OP_DELETE_SNAPSHOT: {
+      DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
+      BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
+      fsNamesys.getSnapshotManager().deleteSnapshot(
+          deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName,
+          collectedBlocks);
+      fsNamesys.removeBlocks(collectedBlocks);
+      collectedBlocks.clear();
+      break;
+    }
+    case OP_RENAME_SNAPSHOT: {
+      RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op;
+      fsNamesys.getSnapshotManager().renameSnapshot(
+          renameSnapshotOp.snapshotRoot, renameSnapshotOp.snapshotOldName,
+          renameSnapshotOp.snapshotNewName);
+      break;
+    }
+    case OP_ALLOW_SNAPSHOT: {
+      AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op;
+      fsNamesys.getSnapshotManager().setSnapshottable(
+          allowSnapshotOp.snapshotRoot);
+      break;
+    }
+    case OP_DISALLOW_SNAPSHOT: {
+      DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op;
+      fsNamesys.getSnapshotManager().resetSnapshottable(
+          disallowSnapshotOp.snapshotRoot);
+      break;
+    }
     default:
       throw new IOException("Invalid operation read " + op.opCode);
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Wed Jan 23 02:48:01 2013
@@ -31,6 +31,9 @@ import java.security.DigestInputStream;
 import java.security.DigestOutputStream;
 import java.security.MessageDigest;
 import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -48,6 +51,10 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
@@ -59,13 +66,14 @@ import org.apache.hadoop.io.Text;
  * In particular, the format of the FSImage looks like:
  * <pre>
  * FSImage {
- *   LayoutVersion: int, NamespaceID: int, NumberItemsInFSDirectoryTree: long,
- *   NamesystemGenerationStamp: long, TransactionID: long
+ *   layoutVersion: int, namespaceID: int, numberItemsInFSDirectoryTree: long,
+ *   namesystemGenerationStamp: long, transactionID: long, 
+ *   snapshotCounter: int, numberOfSnapshots: int, numOfSnapshottableDirs: int,
  *   {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
  * }
  * 
  * FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported) {
- *   INodeInfo of root, NumberOfChildren of root: int
+ *   INodeInfo of root, numberOfChildren of root: int
  *   [list of INodeInfo of root's children],
  *   [list of INodeDirectoryInfo of root's directory children]
  * }
@@ -76,38 +84,76 @@ import org.apache.hadoop.io.Text;
  * 
  * INodeInfo {
  *   {
- *     LocalName: short + byte[]
+ *     localName: short + byte[]
  *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported
  *   or 
  *   {
- *     FullPath: byte[]
+ *     fullPath: byte[]
  *   } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
- *   ReplicationFactor: short, ModificationTime: long,
- *   AccessTime: long, PreferredBlockSize: long,
- *   NumberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
+ *   replicationFactor: short, modificationTime: long,
+ *   accessTime: long, preferredBlockSize: long,
+ *   numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
  *   { 
- *     NsQuota: long, DsQuota: long, FsPermission: short, PermissionStatus
+ *     nsQuota: long, dsQuota: long, 
+ *     {
+ *       isINodeSnapshottable: byte,
+ *       isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
+ *     } (when {@link Feature#SNAPSHOT} is supported), 
+ *     fsPermission: short, PermissionStatus
  *   } for INodeDirectory
  *   or 
  *   {
- *     SymlinkString, FsPermission: short, PermissionStatus
+ *     symlinkString, fsPermission: short, PermissionStatus
  *   } for INodeSymlink
  *   or
  *   {
- *     [list of BlockInfo], FsPermission: short, PermissionStatus
+ *     containsBlock: byte (when {@link Feature#SNAPSHOT} is supported),
+ *     [list of BlockInfo] (when {@link Feature#SNAPSHOT} is not supported or 
+ *     containsBlock is true),
+ *     {
+ *       snapshotFileSize: long,
+ *       isINodeFileWithLink: byte (if ComputedFileSize is negative),
+ *     } (when {@link Feature#SNAPSHOT} is supported), 
+ *     fsPermission: short, PermissionStatus
  *   } for INodeFile
  * }
  * 
  * INodeDirectoryInfo {
- *   FullPath of the directory: short + byte[],
- *   NumberOfChildren: int, [list of INodeInfo of children INode]
- *   [list of INodeDirectoryInfo of the directory children]
+ *   fullPath of the directory: short + byte[],
+ *   numberOfChildren: int, [list of INodeInfo of children INode],
+ *   {
+ *     numberOfSnapshots: int,
+ *     [list of Snapshot] (when NumberOfSnapshots is positive),
+ *     numberOfSnapshotDiffs: int,
+ *     [list of SnapshotDiff] (NumberOfSnapshotDiffs is positive),
+ *     number of children that are directories,
+ *     [list of INodeDirectoryInfo of the directory children] (includes
+ *     snapshot copies of deleted sub-directories)
+ *   } (when {@link Feature#SNAPSHOT} is supported), 
+ * }
+ * 
+ * Snapshot {
+ *   snapshotID: int, root of Snapshot: INodeDirectoryInfo (its local name is 
+ *   the name of the snapshot)
+ * }
+ * 
+ * SnapshotDiff {
+ *   childrenSize: int, 
+ *   full path of the root of the associated Snapshot: short + byte[], 
+ *   isSnapshotRoot: byte, 
+ *   snapshotINodeIsNotNull: byte (when isSnapshotRoot is false),
+ *   snapshotINode: INodeDirectory (when SnapshotINodeIsNotNull is true), Diff 
+ * }
+ * 
+ * Diff {
+ *   createdListSize: int, [Local name of INode in created list],
+ *   deletedListSize: int, [INode in deleted list: INodeInfo]
  * }
  * </pre>
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-class FSImageFormat {
+public class FSImageFormat {
   private static final Log LOG = FSImage.LOG;
   
   // Static-only class
@@ -118,7 +164,7 @@ class FSImageFormat {
    * should be called once, after which the getter methods may be used to retrieve
    * information about the image that was loaded, if loading was successful.
    */
-  static class Loader {
+  public static class Loader {
     private final Configuration conf;
     /** which namesystem this loader is working for */
     private final FSNamesystem namesystem;
@@ -168,9 +214,7 @@ class FSImageFormat {
       }
     }
 
-    void load(File curFile)
-      throws IOException
-    {
+    void load(File curFile) throws IOException {
       checkNotLoaded();
       assert curFile != null : "curFile is null";
 
@@ -209,6 +253,10 @@ class FSImageFormat {
         } else {
           imgTxId = 0;
         }
+        
+        if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
+          namesystem.getSnapshotManager().read(in);
+        }
 
         // read compression related info
         FSImageCompression compression;
@@ -226,7 +274,11 @@ class FSImageFormat {
         LOG.info("Number of files = " + numFiles);
         if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
             imgVersion)) {
-          loadLocalNameINodes(numFiles, in);
+          if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
+            loadLocalNameINodesWithSnapshot(in);
+          } else {
+            loadLocalNameINodes(numFiles, in);
+          }
         } else {
           loadFullNameINodes(numFiles, in);
         }
@@ -260,7 +312,25 @@ class FSImageFormat {
     fsDir.rootDir.cloneModificationTime(root);
     fsDir.rootDir.clonePermissionStatus(root);    
   }
-
+  
+    /**
+     * Load fsimage files when 1) only local names are stored, 
+     * and 2) snapshot is supported.
+     * 
+     * @param in Image input stream
+     */
+    private void loadLocalNameINodesWithSnapshot(DataInputStream in)
+        throws IOException {
+      assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
+          getLayoutVersion());
+      assert LayoutVersion.supports(Feature.SNAPSHOT, getLayoutVersion());
+      
+      // load root
+      loadRoot(in);
+      // load rest of the nodes recursively
+      loadDirectoryWithSnapshot(in);
+    }
+    
   /** 
    * load fsimage files assuming only local names are stored
    *   
@@ -275,13 +345,9 @@ class FSImageFormat {
      assert numFiles > 0;
 
      // load root
-     if( in.readShort() != 0) {
-       throw new IOException("First node is not root");
-     }   
-     INode root = loadINode(in);
-     // update the root's attributes
-     updateRootAttr(root);
-     numFiles--;
+     loadRoot(in);
+     // have loaded the first file (the root)
+     numFiles--; 
 
      // load rest of the nodes directory by directory
      while (numFiles > 0) {
@@ -292,6 +358,77 @@ class FSImageFormat {
      }
    }
    
+    /**
+     * Load information about root, and use the information to update the root
+     * directory of NameSystem.
+     * @param in The {@link DataInputStream} instance to read.
+     */
+    private void loadRoot(DataInputStream in) throws IOException {
+      // load root
+      if (in.readShort() != 0) {
+        throw new IOException("First node is not root");
+      }
+      INode root = loadINode(in);
+      // update the root's attributes
+      updateRootAttr(root);
+    }
+   
+    /** Load children nodes for the parent directory. */
+    private void loadChildren(INodeDirectory parent, DataInputStream in)
+        throws IOException {
+      int numChildren = in.readInt();
+      for (int i = 0; i < numChildren; i++) {
+        // load single inode
+        byte[] localName = new byte[in.readShort()];
+        in.readFully(localName); // read local name
+        INode newNode = loadINode(in); // read rest of inode
+        newNode.setLocalName(localName);
+        addToParent(parent, newNode);
+      }
+    }
+    
+    /**
+     * Load a directory when snapshot is supported.
+     * @param in The {@link DataInputStream} instance to read.
+     */
+    private void loadDirectoryWithSnapshot(DataInputStream in)
+        throws IOException {
+      // Step 1. Identify the parent INode
+      String parentPath = FSImageSerialization.readString(in);
+      final INodeDirectory parent = INodeDirectory.valueOf(
+          namesystem.dir.rootDir.getNode(parentPath, false), parentPath);
+      
+      // Step 2. Load children nodes under parent
+      loadChildren(parent, in);
+      
+      // Step 3. Load snapshots if parent is snapshottable
+      int numSnapshots = in.readInt();
+      INodeDirectorySnapshottable snapshottableParent = null;
+      if (numSnapshots >= 0) {
+        snapshottableParent = (INodeDirectorySnapshottable) parent;
+        // load snapshots and snapshotQuota
+        SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
+            numSnapshots, in, this);
+      }
+      
+      // Step 4. load SnapshotDiff list
+      int numSnapshotDiffs = in.readInt();
+      if (numSnapshotDiffs >= 0) {
+        INodeDirectoryWithSnapshot parentWithSnapshot = 
+            (INodeDirectoryWithSnapshot) parent;
+        // load SnapshotDiff list
+        SnapshotFSImageFormat.loadSnapshotDiffList(parentWithSnapshot,
+            numSnapshotDiffs, in, this);
+      }
+      
+      // Recursively load sub-directories, including snapshot copies of deleted
+      // directories
+      int numSubTree = in.readInt();
+      for (int i = 0; i < numSubTree; i++) {
+        loadDirectoryWithSnapshot(in);
+      }
+    }
+    
    /**
     * Load all children of a directory
     * 
@@ -388,17 +525,25 @@ class FSImageFormat {
     }
   }
 
+    /** @return The FSDirectory of the namesystem where the fsimage is loaded */
+    public FSDirectory getFSDirectoryInLoading() {
+      return namesystem.dir;
+    }
+  
   /**
    * load an inode from fsimage except for its name
    * 
    * @param in data input stream from which image is read
    * @return an inode
    */
-  private INode loadINode(DataInputStream in)
-      throws IOException {
+  public INode loadINode(DataInputStream in) throws IOException {
     long modificationTime = 0;
     long atime = 0;
     long blockSize = 0;
+    long computeFileSize = -1;
+    boolean snapshottable = false;
+    boolean withSnapshot = false;
+    boolean withLink = false;
     
     int imgVersion = getLayoutVersion();
     long inodeId = namesystem.allocateNewInodeId();
@@ -414,11 +559,22 @@ class FSImageFormat {
     BlockInfo blocks[] = null;
 
     if (numBlocks >= 0) {
-      blocks = new BlockInfo[numBlocks];
+      // to indicate INodeFileWithLink, blocks may be set as null while
+      // numBlocks is set to 0
+      blocks = LayoutVersion.supports(Feature.SNAPSHOT, imgVersion) ? (in
+            .readBoolean() ? new BlockInfo[numBlocks] : null)
+            : new BlockInfo[numBlocks];
+      
       for (int j = 0; j < numBlocks; j++) {
         blocks[j] = new BlockInfo(replication);
         blocks[j].readFields(in);
       }
+      if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)) {
+        computeFileSize = in.readLong();
+        if (computeFileSize < 0) {
+          withLink = in.readBoolean();
+        }
+      }
     }
     
     // get quota only when the node is a directory
@@ -431,7 +587,14 @@ class FSImageFormat {
         && blocks == null && numBlocks == -1) {
       dsQuota = in.readLong();
     }
-
+    if (LayoutVersion.supports(Feature.SNAPSHOT, imgVersion)
+        && blocks == null && numBlocks == -1) {
+      snapshottable = in.readBoolean();
+      if (!snapshottable) {
+        withSnapshot = in.readBoolean();
+      }
+    }
+    
     // Read the symlink only when the node is a symlink
     String symlink = "";
     if (numBlocks == -2) {
@@ -441,7 +604,8 @@ class FSImageFormat {
     PermissionStatus permissions = PermissionStatus.read(in);
 
     return INode.newINode(inodeId, permissions, blocks, symlink, replication,
-        modificationTime, atime, nsQuota, dsQuota, blockSize);
+        modificationTime, atime, nsQuota, dsQuota, blockSize, numBlocks,
+        withLink, computeFileSize, snapshottable, withSnapshot);
   }
 
     private void loadFilesUnderConstruction(DataInputStream in)
@@ -557,9 +721,7 @@ class FSImageFormat {
       return savedDigest;
     }
 
-    void save(File newFile,
-              FSImageCompression compression)
-      throws IOException {
+    void save(File newFile, FSImageCompression compression) throws IOException {
       checkNotSaved();
 
       final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
@@ -584,19 +746,19 @@ class FSImageFormat {
         out.writeLong(fsDir.rootDir.numItemsInTree());
         out.writeLong(sourceNamesystem.getGenerationStamp());
         out.writeLong(context.getTxId());
-
+        sourceNamesystem.getSnapshotManager().write(out);
+        
         // write compression info and set up compressed stream
         out = compression.writeHeaderAndWrapStream(fos);
         LOG.info("Saving image file " + newFile +
                  " using " + compression);
 
-
         byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH];
         ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
         // save the root
         FSImageSerialization.saveINode2Image(fsDir.rootDir, out);
         // save the rest of the nodes
-        saveImage(strbuf, fsDir.rootDir, out);
+        saveImage(strbuf, fsDir.rootDir, out, null);
         // save files under construction
         sourceNamesystem.saveFilesUnderConstruction(out);
         context.checkCancelled();
@@ -619,42 +781,143 @@ class FSImageFormat {
     }
 
     /**
-     * Save file tree image starting from the given root.
-     * This is a recursive procedure, which first saves all children of
-     * a current directory and then moves inside the sub-directories.
+     * Save children INodes.
+     * @param children The list of children INodes
+     * @param out The DataOutputStream to write
+     * @return Number of children that are directory
      */
-    private void saveImage(ByteBuffer currentDirName,
-                                  INodeDirectory current,
-                                  DataOutputStream out) throws IOException {
-      final ReadOnlyList<INode> children = current.getChildrenList(null);
-      if (children.isEmpty()) {
-        return;
-      }
-      // print prefix (parent directory name)
-      int prefixLen = currentDirName.position();
-      if (prefixLen == 0) {  // root
-        out.writeShort(PATH_SEPARATOR.length);
-        out.write(PATH_SEPARATOR);
-      } else {  // non-root directories
-        out.writeShort(prefixLen);
-        out.write(currentDirName.array(), 0, prefixLen);
-      }
+    private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out)
+        throws IOException {
+      // Write normal children INode. 
       out.writeInt(children.size());
+      int dirNum = 0;
       int i = 0;
       for(INode child : children) {
         // print all children first
         FSImageSerialization.saveINode2Image(child, out);
+        if (child.isDirectory()) {
+          dirNum++;
+        }
         if (i++ % 50 == 0) {
           context.checkCancelled();
         }
       }
+      return dirNum;
+    }
+    
+    /**
+     * The nonSnapshotPath is a path without snapshot in order to enable buffer
+     * reuse. If the snapshot is not null, we need to compute a snapshot path.
+     * E.g., when nonSnapshotPath is "/test/foo/bar/" and the snapshot is s1 of
+     * /test, we actually want to save image for directory /test/foo/bar/ under
+     * snapshot s1 of /test, and the path to save thus should be
+     * "/test/.snapshot/s1/foo/bar/".
+     * 
+     * @param nonSnapshotPath The path without snapshot related information.
+     * @param snapshot The snapshot associated with the inode that the path 
+     *                 actually leads to.
+     * @return The snapshot path.                
+     */
+    private String computeSnapshotPath(String nonSnapshotPath, 
+        Snapshot snapshot) {
+      String snapshotParentFullPath = snapshot.getRoot().getParent()
+          .getFullPathName();
+      String snapshotName = snapshot.getRoot().getLocalName();
+      String relativePath = nonSnapshotPath.equals(snapshotParentFullPath) ? 
+          Path.SEPARATOR : nonSnapshotPath.substring(
+               snapshotParentFullPath.length());
+      String snapshotFullPath = snapshotParentFullPath + Path.SEPARATOR
+          + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName
+          + relativePath;
+      return snapshotFullPath;
+    }
+    
+    /**
+     * Save file tree image starting from the given root.
+     * This is a recursive procedure, which first saves all children and 
+     * snapshot diffs of a current directory and then moves inside the 
+     * sub-directories.
+     * 
+     * @param currentDirName A ByteBuffer storing the path leading to the 
+     *                       current node. For a snapshot node, the path is
+     *                       (the snapshot path - ".snapshot/snapshot_name")
+     * @param current The current node
+     * @param out The DataoutputStream to write the image
+     * @param snapshot The possible snapshot associated with the current node
+     */
+    private void saveImage(ByteBuffer currentDirName, INodeDirectory current,
+        DataOutputStream out, Snapshot snapshot)
+        throws IOException {
+      final ReadOnlyList<INode> children = current.getChildrenList(null);
+      int dirNum = 0;
+      Map<Snapshot, List<INodeDirectory>> snapshotDirMap = null;
+      if (current instanceof INodeDirectoryWithSnapshot) {
+        snapshotDirMap = new HashMap<Snapshot, List<INodeDirectory>>();
+        dirNum += ((INodeDirectoryWithSnapshot) current).
+            getSnapshotDirectory(snapshotDirMap);
+      }
+      
+      // 1. Print prefix (parent directory name)
+      int prefixLen = currentDirName.position();
+      if (snapshot == null) {
+        if (prefixLen == 0) {  // root
+          out.writeShort(PATH_SEPARATOR.length);
+          out.write(PATH_SEPARATOR);
+        } else {  // non-root directories
+          out.writeShort(prefixLen);
+          out.write(currentDirName.array(), 0, prefixLen);
+        }
+      } else {
+        String nonSnapshotPath = prefixLen == 0 ? Path.SEPARATOR : DFSUtil
+            .bytes2String(currentDirName.array(), 0, prefixLen);
+        String snapshotFullPath = computeSnapshotPath(nonSnapshotPath, 
+            snapshot);
+        byte[] snapshotFullPathBytes = DFSUtil.string2Bytes(snapshotFullPath);
+        out.writeShort(snapshotFullPathBytes.length);
+        out.write(snapshotFullPathBytes);
+      }
+      
+      // 2. Write children INode 
+      dirNum += saveChildren(children, out);
+      
+      // 3. Write INodeDirectorySnapshottable#snapshotsByNames to record all
+      // Snapshots
+      if (current instanceof INodeDirectorySnapshottable) {
+        INodeDirectorySnapshottable snapshottableNode = 
+            (INodeDirectorySnapshottable) current;
+        SnapshotFSImageFormat.saveSnapshots(snapshottableNode, out);
+      } else {
+        out.writeInt(-1); // # of snapshots
+      }
+      
+      // 4. Write SnapshotDiff lists.
+      if (current instanceof INodeDirectoryWithSnapshot) {
+        INodeDirectoryWithSnapshot sNode = (INodeDirectoryWithSnapshot) current;
+        SnapshotFSImageFormat.saveSnapshotDiffs(sNode, out);
+      } else {
+        out.writeInt(-1); // # of SnapshotDiffs
+      }
+      
+      // Write sub-tree of sub-directories, including possible snapshots of 
+      // deleted sub-directories
+      out.writeInt(dirNum); // the number of sub-directories
       for(INode child : children) {
         if(!child.isDirectory())
           continue;
         currentDirName.put(PATH_SEPARATOR).put(child.getLocalNameBytes());
-        saveImage(currentDirName, (INodeDirectory)child, out);
+        saveImage(currentDirName, (INodeDirectory)child, out, snapshot);
         currentDirName.position(prefixLen);
       }
+      if (snapshotDirMap != null) {
+        for (Snapshot ss : snapshotDirMap.keySet()) {
+          List<INodeDirectory> snapshotSubDirs = snapshotDirMap.get(ss);
+          for (INodeDirectory subDir : snapshotSubDirs) {
+            currentDirName.put(PATH_SEPARATOR).put(subDir.getLocalNameBytes());
+            saveImage(currentDirName, subDir, out, ss);
+            currentDirName.position(prefixLen);
+          }
+        }
+      }
     }
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java Wed Jan 23 02:48:01 2013
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.DataInputStream;
+import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
@@ -32,6 +33,10 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.ShortWritable;
 import org.apache.hadoop.io.Text;
@@ -143,52 +148,109 @@ public class FSImageSerialization {
     out.writeInt(0); //  do not store locations of last block
   }
 
-  /*
-   * Save one inode's attributes to the image.
+  /**
+   * Serialize a {@link INodeDirectory}
+   * @param node The node to write
+   * @param out The {@link DataOutput} where the fields are written 
    */
-  static void saveINode2Image(INode node,
-                              DataOutputStream out) throws IOException {
+  public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
+      throws IOException {
     byte[] name = node.getLocalNameBytes();
     out.writeShort(name.length);
     out.write(name);
-    FsPermission filePerm = TL_DATA.get().FILE_PERM;
-    if (node.isDirectory()) {
-      out.writeShort(0);  // replication
-      out.writeLong(node.getModificationTime());
-      out.writeLong(0);   // access time
-      out.writeLong(0);   // preferred block size
-      out.writeInt(-1);   // # of blocks
-      out.writeLong(node.getNsQuota());
-      out.writeLong(node.getDsQuota());
-      filePerm.fromShort(node.getFsPermissionShort());
-      PermissionStatus.write(out, node.getUserName(),
-                             node.getGroupName(),
-                             filePerm);
-    } else if (node.isSymlink()) {
-      out.writeShort(0);  // replication
-      out.writeLong(0);   // modification time
-      out.writeLong(0);   // access time
-      out.writeLong(0);   // preferred block size
-      out.writeInt(-2);   // # of blocks
-      Text.writeString(out, ((INodeSymlink)node).getSymlinkString());
-      filePerm.fromShort(node.getFsPermissionShort());
-      PermissionStatus.write(out, node.getUserName(),
-                             node.getGroupName(),
-                             filePerm);      
+    out.writeShort(0);  // replication
+    out.writeLong(node.getModificationTime());
+    out.writeLong(0);   // access time
+    out.writeLong(0);   // preferred block size
+    out.writeInt(-1);   // # of blocks
+    out.writeLong(node.getNsQuota());
+    out.writeLong(node.getDsQuota());
+    if (node instanceof INodeDirectorySnapshottable) {
+      out.writeBoolean(true);
     } else {
-      INodeFile fileINode = (INodeFile)node;
-      out.writeShort(fileINode.getFileReplication());
-      out.writeLong(fileINode.getModificationTime());
-      out.writeLong(fileINode.getAccessTime());
-      out.writeLong(fileINode.getPreferredBlockSize());
+      out.writeBoolean(false);
+      out.writeBoolean(node instanceof INodeDirectoryWithSnapshot);
+    }
+    FsPermission filePerm = TL_DATA.get().FILE_PERM;
+    filePerm.fromShort(node.getFsPermissionShort());
+    PermissionStatus.write(out, node.getUserName(),
+                           node.getGroupName(),
+                           filePerm);
+  }
+  
+  /**
+   * Serialize a {@link INodeSymlink} node
+   * @param node The node to write
+   * @param out The {@link DataOutput} where the fields are written
+   */
+  private static void writeINodeSymlink(INodeSymlink node, DataOutput out)
+      throws IOException {
+    byte[] name = node.getLocalNameBytes();
+    out.writeShort(name.length);
+    out.write(name);
+    out.writeShort(0);  // replication
+    out.writeLong(0);   // modification time
+    out.writeLong(0);   // access time
+    out.writeLong(0);   // preferred block size
+    out.writeInt(-2);   // # of blocks
+    Text.writeString(out, node.getSymlinkString());
+    FsPermission filePerm = TL_DATA.get().FILE_PERM;
+    filePerm.fromShort(node.getFsPermissionShort());
+    PermissionStatus.write(out, node.getUserName(),
+                           node.getGroupName(),
+                           filePerm);
+  }
+  
+  /**
+   * Serialize a {@link INodeFile} node
+   * @param node The node to write
+   * @param out The {@link DataOutput} where the fields are written
+   * @param writeBlock Whether to write block information
+   */
+  public static void writeINodeFile(INodeFile node, DataOutput out,
+      boolean writeBlock) throws IOException {
+    byte[] name = node.getLocalNameBytes();
+    out.writeShort(name.length);
+    out.write(name);
+    INodeFile fileINode = node;
+    out.writeShort(fileINode.getFileReplication());
+    out.writeLong(fileINode.getModificationTime());
+    out.writeLong(fileINode.getAccessTime());
+    out.writeLong(fileINode.getPreferredBlockSize());
+    if (writeBlock) {
       Block[] blocks = fileINode.getBlocks();
       out.writeInt(blocks.length);
+      out.writeBoolean(true);
       for (Block blk : blocks)
         blk.write(out);
-      filePerm.fromShort(fileINode.getFsPermissionShort());
-      PermissionStatus.write(out, fileINode.getUserName(),
-                             fileINode.getGroupName(),
-                             filePerm);
+    } else {
+      out.writeInt(0); // # of blocks
+      out.writeBoolean(false);
+    }
+    if (node instanceof INodeFileSnapshot) {
+      out.writeLong(((INodeFileSnapshot) node).computeFileSize(true));
+    } else {
+      out.writeLong(-1);
+      out.writeBoolean(node instanceof INodeFileWithSnapshot);
+    }
+    FsPermission filePerm = TL_DATA.get().FILE_PERM;
+    filePerm.fromShort(fileINode.getFsPermissionShort());
+    PermissionStatus.write(out, fileINode.getUserName(),
+                           fileINode.getGroupName(),
+                           filePerm);
+  }
+  
+  /**
+   * Save one inode's attributes to the image.
+   */
+  static void saveINode2Image(INode node, DataOutput out)
+      throws IOException {
+    if (node.isDirectory()) {
+      writeINodeDirectory((INodeDirectory) node, out);
+    } else if (node.isSymlink()) {
+      writeINodeSymlink((INodeSymlink) node, out);      
+    } else {
+      writeINodeFile((INodeFile) node, out, true);
     }
   }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Jan 23 02:48:01 2013
@@ -2824,7 +2824,7 @@ public class FSNamesystem implements Nam
    *          An instance of {@link BlocksMapUpdateInfo} which contains a list
    *          of blocks that need to be removed from blocksMap
    */
-  private void removeBlocks(BlocksMapUpdateInfo blocks) {
+  void removeBlocks(BlocksMapUpdateInfo blocks) {
     Iterator<Map.Entry<Block, BlocksMapINodeUpdateEntry>> iter = blocks
         .iterator();
     while (iter.hasNext()) {
@@ -5645,6 +5645,10 @@ public class FSNamesystem implements Nam
         .isAvoidingStaleDataNodesForWrite();
   }
   
+  public SnapshotManager getSnapshotManager() {
+    return snapshotManager;
+  }
+  
   /** Allow snapshot on a directroy. */
   public void allowSnapshot(String path) throws SafeModeException, IOException {
     writeLock();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Wed Jan 23 02:48:01 2013
@@ -35,6 +35,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.util.StringUtils;
@@ -619,31 +624,41 @@ public abstract class INode implements C
    * @param nsQuota namespace quota
    * @param dsQuota disk quota
    * @param preferredBlockSize block size
+   * @param numBlocks number of blocks
+   * @param withLink whether the node is INodeWithLink
+   * @param computeFileSize non-negative computeFileSize means the node is 
+   *                        INodeFileSnapshot
+   * @param snapshottable whether the node is {@link INodeDirectorySnapshottable}
+   * @param withSnapshot whether the node is {@link INodeDirectoryWithSnapshot}                       
    * @return an inode
    */
-  static INode newINode(long id,
-                        PermissionStatus permissions,
-                        BlockInfo[] blocks,
-                        String symlink,
-                        short replication,
-                        long modificationTime,
-                        long atime,
-                        long nsQuota,
-                        long dsQuota,
-                        long preferredBlockSize) {
+  static INode newINode(long id, PermissionStatus permissions,
+      BlockInfo[] blocks, String symlink, short replication,
+      long modificationTime, long atime, long nsQuota, long dsQuota,
+      long preferredBlockSize, int numBlocks, boolean withLink,
+      long computeFileSize, boolean snapshottable, boolean withSnapshot) {
     if (symlink.length() != 0) { // check if symbolic link
       return new INodeSymlink(id, symlink, modificationTime, atime, permissions);
-    }  else if (blocks == null) { //not sym link and blocks null? directory!
+    }  else if (blocks == null && numBlocks < 0) { 
+      //not sym link and numBlocks < 0? directory!
+      INodeDirectory dir = null;
       if (nsQuota >= 0 || dsQuota >= 0) {
-        return new INodeDirectoryWithQuota(
-             id, permissions, modificationTime, nsQuota, dsQuota);
-      } 
-      // regular directory
-      return new INodeDirectory(id, permissions, modificationTime);
+        dir = new INodeDirectoryWithQuota(id, permissions, modificationTime,
+            nsQuota, dsQuota);
+      } else {
+        // regular directory
+        dir = new INodeDirectory(id, permissions, modificationTime);
+      }
+      return snapshottable ? new INodeDirectorySnapshottable(dir)
+          : (withSnapshot ? INodeDirectoryWithSnapshot.newInstance(dir, null)
+              : dir);
     }
     // file
-    return new INodeFile(id, permissions, blocks, replication,
+    INodeFile fileNode = new INodeFile(id, permissions, blocks, replication,
         modificationTime, atime, preferredBlockSize);
+    return computeFileSize >= 0 ? new INodeFileSnapshot(fileNode,
+        computeFileSize) : (withLink ? new INodeFileWithSnapshot(fileNode)
+        : fileNode);
   }
 
   /**
@@ -662,7 +677,8 @@ public abstract class INode implements C
    * @param prefix The prefix string that each line should print.
    */
   @VisibleForTesting
-  public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, Snapshot snapshot) {
+  public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
+      Snapshot snapshot) {
     out.print(prefix);
     out.print(" ");
     out.print(getLocalName());
@@ -670,10 +686,27 @@ public abstract class INode implements C
     out.print(getObjectString());
     out.print("), parent=");
     out.print(parent == null? null: parent.getLocalName() + "/");
+    out.print(", permission=" + getFsPermission(snapshot) + ", group="
+        + getGroupName(snapshot) + ", user=" + getUserName(snapshot));
     if (!this.isDirectory()) {
+      if (this.isFile()) {
+        // print block information
+        String blocksInfo = ((INodeFile) this).printBlocksInfo();
+        out.print(", blocks=[" + blocksInfo + "]");
+      }
+      if (this instanceof INodeFileWithSnapshot) {
+        INodeFileWithSnapshot nodeWithLink = (INodeFileWithSnapshot) this;
+        FileWithSnapshot next = nodeWithLink.getNext();
+        out.print(", next="
+            + (next != null ? next.asINodeFile().getObjectString() : "null"));
+        if (this instanceof INodeFileSnapshot) {
+          out.print(", computedSize="
+              + ((INodeFileSnapshot) this).computeFileSize(true));
+        }
+      }
       out.println();
     } else {
-      final INodeDirectory dir = (INodeDirectory)this;
+      final INodeDirectory dir = (INodeDirectory) this;
       out.println(", size=" + dir.getChildrenList(snapshot).size());
     }
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Wed Jan 23 02:48:01 2013
@@ -319,4 +319,18 @@ public class INodeFile extends INode imp
   public int numBlocks() {
     return blocks == null ? 0 : blocks.length;
   }
+  
+  /**
+   * @return A String containing all the blockInfo
+   */
+  String printBlocksInfo() {
+    if (blocks == null) {
+      return "";
+    }
+    StringBuilder buffer = new StringBuilder();
+    for (BlockInfo blk : blocks) {
+      buffer.append(blk.toString() + " ");
+    }
+    return buffer.toString();
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java Wed Jan 23 02:48:01 2013
@@ -41,12 +41,27 @@ public interface FileWithSnapshot {
   /** Set the next element. */
   public void setNext(FileWithSnapshot next);
   
-  /** Insert inode to the circular linked list. */
-  public void insert(FileWithSnapshot inode);
+  /** Insert inode to the circular linked list, after the current node. */
+  public void insertAfter(FileWithSnapshot inode);
+  
+  /** Insert inode to the circular linked list, before the current node. */
+  public void insertBefore(FileWithSnapshot inode);
+  
+  /** Remove self from the circular list */
+  public void removeSelf();
   
   /** Utility methods for the classes which implement the interface. */
   static class Util {
 
+    /** @return The previous node in the circular linked list */
+    static FileWithSnapshot getPrevious(FileWithSnapshot file) {
+      FileWithSnapshot previous = file.getNext();
+      while (previous.getNext() != file) {
+        previous = previous.getNext();
+      }
+      return previous;
+    }
+    
     /** Replace the old file with the new file in the circular linked list. */
     static void replace(FileWithSnapshot oldFile, FileWithSnapshot newFile) {
       //set next element

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Wed Jan 23 02:48:01 2013
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.util.Time;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
@@ -67,9 +66,8 @@ public class INodeDirectorySnapshottable
   /**
    * @return {@link #snapshotsByNames}
    */
-  @VisibleForTesting
-  List<Snapshot> getSnapshotsByNames() {
-    return snapshotsByNames;
+  ReadOnlyList<Snapshot> getSnapshotsByNames() {
+    return ReadOnlyList.Util.asReadOnlyList(this.snapshotsByNames);
   }
   
   /** Number of snapshots allowed. */
@@ -82,7 +80,7 @@ public class INodeDirectorySnapshottable
   
   /** @return the number of existing snapshots. */
   public int getNumSnapshots() {
-    return getSnapshotsByNames().size();
+    return snapshotsByNames.size();
   }
   
   private int searchSnapshot(byte[] snapshotName) {
@@ -153,6 +151,14 @@ public class INodeDirectorySnapshottable
   public boolean isSnapshottable() {
     return true;
   }
+  
+  /**
+   * Simply add a snapshot into the {@link #snapshotsByNames}. Used by FSImage
+   * loading.
+   */
+  void addSnapshot(Snapshot snapshot) {
+    this.snapshotsByNames.add(snapshot);
+  }
 
   /** Add a snapshot. */
   Snapshot addSnapshot(int id, String name) throws SnapshotException {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Wed Jan 23 02:48:01 2013
@@ -17,19 +17,23 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
+import java.io.DataOutput;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
@@ -79,7 +83,7 @@ public class INodeDirectoryWithSnapshot 
    *   2.3.3. modify i in current and then modify: replace it in c-list (c', d)
    * </pre>
    */
-  static class Diff {
+  public static class Diff {
     /**
      * Search the inode from the list.
      * @return -1 if the list is null; otherwise, return the insertion point
@@ -105,6 +109,16 @@ public class INodeDirectoryWithSnapshot 
     /** d-list: inode(s) deleted from current. */
     private List<INode> deleted;
 
+    INode searchCreated(final byte[] name) {
+      int cIndex = search(created, name);
+      return cIndex < 0 ? null : created.get(cIndex);
+    }
+    
+    INode searchDeleted(final byte[] name) {
+      int dIndex = search(deleted, name);
+      return dIndex < 0 ? null : deleted.get(dIndex);
+    }
+    
     /**
      * Insert the inode to created.
      * @param i the insertion point defined
@@ -155,13 +169,18 @@ public class INodeDirectoryWithSnapshot 
      * Delete an inode from current state.
      * @return a triple for undo.
      */
-    Triple<Integer, INode, Integer> delete(final INode inode) {
+    Triple<Integer, INode, Integer> delete(final INode inode,
+        boolean updateCircularList) {
       final int c = search(created, inode);
       INode previous = null;
       Integer d = null;
       if (c >= 0) {
         // remove a newly created inode
         previous = created.remove(c);
+        if (updateCircularList && previous instanceof FileWithSnapshot) {
+          // also we should remove previous from the circular list
+          ((FileWithSnapshot) previous).removeSelf();
+        }
       } else {
         // not in c-list, it must be in previous
         d = search(deleted, inode);
@@ -184,7 +203,8 @@ public class INodeDirectoryWithSnapshot 
      * Modify an inode in current state.
      * @return a triple for undo.
      */
-    Triple<Integer, INode, Integer> modify(final INode oldinode, final INode newinode) {
+    Triple<Integer, INode, Integer> modify(final INode oldinode,
+        final INode newinode, boolean updateCircularList) {
       if (!oldinode.equals(newinode)) {
         throw new AssertionError("The names do not match: oldinode="
             + oldinode + ", newinode=" + newinode);
@@ -196,6 +216,14 @@ public class INodeDirectoryWithSnapshot 
         // Case 1.1.3 and 2.3.3: inode is already in c-list,
         previous = created.set(c, newinode);
         
+        if (updateCircularList && newinode instanceof FileWithSnapshot) {
+          // also should remove oldinode from the circular list
+          FileWithSnapshot newNodeWithLink = (FileWithSnapshot) newinode;
+          FileWithSnapshot oldNodeWithLink = (FileWithSnapshot) oldinode;
+          newNodeWithLink.setNext(oldNodeWithLink.getNext());
+          oldNodeWithLink.setNext(null);
+        }
+        
         //TODO: fix a bug that previous != oldinode.  Set it to oldinode for now
         previous = oldinode;
       } else {
@@ -328,8 +356,11 @@ public class INodeDirectoryWithSnapshot 
      * @param the posterior diff to combine
      * @param deletedINodeProcesser Used in case 2.1, 2.3, 3.1, and 3.3
      *                              to process the deleted inodes.
+     * @param updateCircularList Whether to update the circular linked list 
+     *                           while combining the diffs.                             
      */
-    void combinePostDiff(Diff postDiff, Processor deletedINodeProcesser) {
+    void combinePostDiff(Diff postDiff, Processor deletedINodeProcesser,
+        boolean updateCircularList) {
       final List<INode> postCreated = postDiff.created != null?
           postDiff.created: Collections.<INode>emptyList();
       final List<INode> postDeleted = postDiff.deleted != null?
@@ -350,14 +381,16 @@ public class INodeDirectoryWithSnapshot 
           c = createdIterator.hasNext()? createdIterator.next(): null;
         } else if (cmp > 0) {
           // case 2: only in d-list
-          Triple<Integer, INode, Integer> triple = delete(d);
+          Triple<Integer, INode, Integer> triple = delete(d, 
+              updateCircularList);
           if (deletedINodeProcesser != null) {
             deletedINodeProcesser.process(triple.middle);
           }
           d = deletedIterator.hasNext()? deletedIterator.next(): null;
         } else {
           // case 3: in both c-list and d-list 
-          final Triple<Integer, INode, Integer> triple = modify(d, c);
+          final Triple<Integer, INode, Integer> triple = modify(d, c,
+              updateCircularList);
           if (deletedINodeProcesser != null) {
             deletedINodeProcesser.process(triple.middle);
           }
@@ -386,6 +419,74 @@ public class INodeDirectoryWithSnapshot 
           + "{created=" + toString(created)
           + ", deleted=" + toString(deleted) + "}";
     }
+    
+    /** Serialize {@link #created} */
+    private void writeCreated(DataOutput out) throws IOException {
+      if (created != null) {
+        out.writeInt(created.size());
+        for (INode node : created) {
+          // For INode in created list, we only need to record its local name 
+          byte[] name = node.getLocalNameBytes();
+          out.writeShort(name.length);
+          out.write(name);
+        }
+      } else {
+        out.writeInt(0);
+      }     
+    }
+    
+    /** Serialize {@link #deleted} */
+    private void writeDeleted(DataOutput out) throws IOException {
+      if (deleted != null) {
+        out.writeInt(deleted.size());
+        for (INode node : deleted) {
+          if (node.isDirectory()) {
+            FSImageSerialization.writeINodeDirectory((INodeDirectory) node, out);
+          } else { // INodeFile
+            // we write the block information only for INodeFile node when the
+            // node is only stored in the deleted list or the node is not a
+            // snapshot copy
+            int createdIndex = search(created, node);
+            if (createdIndex < 0) {
+              FSImageSerialization.writeINodeFile((INodeFile) node, out, true);
+            } else {
+              INodeFile cNode = (INodeFile) created.get(createdIndex);
+              INodeFile dNode = (INodeFile) node;
+              // A corner case here: after deleting a Snapshot, when combining
+              // SnapshotDiff, we may put two inodes sharing the same name but
+              // with totally different blocks in the created and deleted list of
+              // the same SnapshotDiff.
+              if (cNode.getBlocks() == dNode.getBlocks()) {
+                FSImageSerialization.writeINodeFile(dNode, out, false);
+              } else {
+                FSImageSerialization.writeINodeFile(dNode, out, true);
+              }
+            }
+          }
+        }
+      } else {
+        out.writeInt(0);
+      }
+    }
+    
+    /** Serialize to out */
+    private void write(DataOutput out) throws IOException {
+      writeCreated(out);
+      writeDeleted(out);    
+    }
+    
+    /** @return The list of INodeDirectory contained in the deleted list */
+    private List<INodeDirectory> getDirsInDeleted() {
+      List<INodeDirectory> dirList = new ArrayList<INodeDirectory>();
+      if (deleted != null) {
+        for (INode node : deleted) {
+          if (node.isDirectory()) {
+            dirList.add((INodeDirectory) node);
+          }
+        }
+      }
+      return dirList;
+    }
   }
   
   /**
@@ -406,7 +507,7 @@ public class INodeDirectoryWithSnapshot 
    *   s_k     = s_{k+1} - d_k = (current state) - d_n - d_{n-1} - ... - d_k.
    * </pre>
    */
-  class SnapshotDiff implements Comparable<Snapshot> {
+  public class SnapshotDiff implements Comparable<Snapshot> {
     /** The snapshot will be obtained after this diff is applied. */
     final Snapshot snapshot;
     /** The size of the children list at snapshot creation time. */
@@ -419,7 +520,7 @@ public class INodeDirectoryWithSnapshot 
      */
     private SnapshotDiff posteriorDiff;
     /** The children list diff. */
-    private final Diff diff = new Diff();
+    private final Diff diff;
     /** The snapshot inode data.  It is null when there is no change. */
     private INodeDirectory snapshotINode = null;
 
@@ -428,6 +529,25 @@ public class INodeDirectoryWithSnapshot 
 
       this.snapshot = snapshot;
       this.childrenSize = dir.getChildrenList(null).size();
+      this.diff = new Diff();
+    }
+
+    /** Constructor used by FSImage loading */
+    SnapshotDiff(Snapshot snapshot,
+        int childrenSize, INodeDirectory snapshotINode,
+        SnapshotDiff posteriorDiff, List<INode> createdList,
+        List<INode> deletedList) {
+      this.snapshot = snapshot;
+      this.childrenSize = childrenSize;
+      this.snapshotINode = snapshotINode;
+      this.posteriorDiff = posteriorDiff;
+      this.diff = new Diff();
+      diff.created = createdList;
+      diff.deleted = deletedList;
+    }
+    
+    public Diff getDiff() {
+      return diff;
     }
 
     /** Compare diffs with snapshot ID. */
@@ -485,7 +605,7 @@ public class INodeDirectoryWithSnapshot 
           if (children == null) {
             final Diff combined = new Diff();
             for(SnapshotDiff d = SnapshotDiff.this; d != null; d = d.posteriorDiff) {
-              combined.combinePostDiff(d.diff, null);
+              combined.combinePostDiff(d.diff, null, false);
             }
             children = combined.apply2Current(ReadOnlyList.Util.asList(
                 INodeDirectoryWithSnapshot.this.getChildrenList(null)));
@@ -538,6 +658,36 @@ public class INodeDirectoryWithSnapshot 
           + (posteriorDiff == null? null: posteriorDiff.snapshot)
           + ") childrenSize=" + childrenSize + ", " + diff;
     }
+    
+    /** Serialize fields to out */
+    void write(DataOutput out) throws IOException {
+      out.writeInt(childrenSize);
+      // No need to write all fields of Snapshot here, since the snapshot must
+      // have been recorded before when writing the FSImage. We only need to
+      // record the full path of its root.
+      byte[] fullPath = DFSUtil.string2Bytes(snapshot.getRoot()
+          .getFullPathName());
+      out.writeShort(fullPath.length);
+      out.write(fullPath);
+      // write snapshotINode
+      if (isSnapshotRoot()) {
+        out.writeBoolean(true);
+      } else {
+        out.writeBoolean(false);
+        if (snapshotINode != null) {
+          out.writeBoolean(true);
+          FSImageSerialization.writeINodeDirectory(snapshotINode, out);
+        } else {
+          out.writeBoolean(false);
+        }
+      }
+      // Write diff. Node need to write poseriorDiff, since diffs is a list.
+      diff.write(out);
+    }
+    
+    private List<INodeDirectory> getSnapshotDirectory() {
+      return diff.getDirsInDeleted();
+    }
   }
   
   /** An interface for passing a method to process inodes. */
@@ -598,7 +748,7 @@ public class INodeDirectoryWithSnapshot 
               ((INodeFile)inode).collectSubtreeBlocksAndClear(collectedBlocks);
             }
           }
-        });
+        }, true);
 
         previousDiff.posteriorDiff = diffToRemove.posteriorDiff;
         diffToRemove.posteriorDiff = null;
@@ -606,7 +756,12 @@ public class INodeDirectoryWithSnapshot 
       return diffToRemove;
     }
   }
-
+  
+  /** Insert a SnapshotDiff to the head of diffs */
+  public void insertDiff(SnapshotDiff diff) {
+    diffs.add(0, diff);
+  }
+  
   /** Add a {@link SnapshotDiff} for the given snapshot and directory. */
   SnapshotDiff addSnapshotDiff(Snapshot snapshot, INodeDirectory dir,
       boolean isSnapshotCreation) {
@@ -623,7 +778,7 @@ public class INodeDirectoryWithSnapshot 
     }
     return d;
   }
-
+  
   SnapshotDiff getLastSnapshotDiff() {
     final int n = diffs.size();
     return n == 0? null: diffs.get(n - 1);
@@ -656,7 +811,6 @@ public class INodeDirectoryWithSnapshot 
   /**
    * @return {@link #snapshots}
    */
-  @VisibleForTesting
   List<SnapshotDiff> getSnapshotDiffs() {
     return diffs;
   }
@@ -709,7 +863,7 @@ public class INodeDirectoryWithSnapshot 
     }
 
     final Pair<? extends INode, ? extends INode> p = child.createSnapshotCopy();
-    diff.diff.modify(p.right, p.left);
+    diff.diff.modify(p.right, p.left, true);
     return p;
   }
 
@@ -734,7 +888,7 @@ public class INodeDirectoryWithSnapshot 
     Triple<Integer, INode, Integer> undoInfo = null;
     if (latest != null) {
       diff = checkAndAddLatestDiff(latest);
-      undoInfo = diff.delete(child);
+      undoInfo = diff.delete(child, true);
     }
     final INode removed = super.removeChild(child, null);
     if (removed == null && undoInfo != null) {
@@ -794,4 +948,25 @@ public class INodeDirectoryWithSnapshot 
   public String toString() {
     return super.toString() + ", diffs=" + getSnapshotDiffs();
   }
+  
+  /**
+   * Get all the INodeDirectory stored in the deletes lists.
+   * 
+   * @param snapshotDirMap
+   *          A HashMap storing all the INodeDirectory stored in the deleted
+   *          lists, with their associated full Snapshot.
+   * @return The number of INodeDirectory returned.
+   */
+  public int getSnapshotDirectory(
+      Map<Snapshot, List<INodeDirectory>> snapshotDirMap) {
+    int dirNum = 0;
+    for (SnapshotDiff sdiff : diffs) {
+      List<INodeDirectory> list = sdiff.getSnapshotDirectory();
+      if (list.size() > 0) {
+        snapshotDirMap.put(sdiff.snapshot, list);
+        dirNum += list.size();
+      }
+    }
+    return dirNum;
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java Wed Jan 23 02:48:01 2013
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.namenode.FSImage;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 
 /**
  *  INode representing a snapshot of a file.
@@ -25,18 +27,27 @@ import org.apache.hadoop.classification.
 @InterfaceAudience.Private
 public class INodeFileSnapshot extends INodeFileWithSnapshot {
   /** The file size at snapshot creation time. */
-  final long size;
+  final long snapshotFileSize;
 
   INodeFileSnapshot(INodeFileWithSnapshot f) {
     super(f);
-    this.size = f.computeFileSize(true);
-    f.insert(this);
+    this.snapshotFileSize = f.computeFileSize(true);
+    f.insertAfter(this);
+  }
+  
+  /**
+   * A constructor that only sets the basic attributes and the size. Used while
+   * loading {@link FSImage}
+   */
+  public INodeFileSnapshot(INodeFile f, long size) {
+    super(f);
+    this.snapshotFileSize = size;
   }
 
   @Override
   public long computeFileSize(boolean includesBlockInfoUnderConstruction) {
     //ignore includesBlockInfoUnderConstruction 
     //since files in a snapshot are considered as closed.
-    return size;
+    return snapshotFileSize;
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionSnapshot.java Wed Jan 23 02:48:01 2013
@@ -32,7 +32,7 @@ public class INodeFileUnderConstructionS
   INodeFileUnderConstructionSnapshot(INodeFileUnderConstructionWithSnapshot f) {
     super(f, f.getClientName(), f.getClientMachine(), f.getClientNode());
     this.size = f.computeFileSize(true);
-    f.insert(this);
+    f.insertAfter(this);
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java Wed Jan 23 02:48:01 2013
@@ -74,10 +74,30 @@ public class INodeFileUnderConstructionW
   }
 
   @Override
-  public void insert(FileWithSnapshot inode) {
+  public void insertAfter(FileWithSnapshot inode) {
     inode.setNext(this.getNext());
     this.setNext(inode);
   }
+  
+  @Override
+  public void insertBefore(FileWithSnapshot inode) {
+    inode.setNext(this);
+    if (this.next == null || this.next == this) {
+      this.next = inode;
+      return;
+    }
+    FileWithSnapshot previous = Util.getPrevious(this);
+    previous.setNext(inode);
+  }
+
+  @Override
+  public void removeSelf() {
+    if (this.next != null && this.next != this) {
+      FileWithSnapshot previous = Util.getPrevious(this);
+      previous.setNext(next);
+    }
+    this.next = null;
+  }
 
   @Override
   public short getBlockReplication() {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Wed Jan 23 02:48:01 2013
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util;
 
 /**
  * Represent an {@link INodeFile} that is snapshotted.
@@ -70,10 +69,30 @@ public class INodeFileWithSnapshot exten
   }
 
   @Override
-  public void insert(FileWithSnapshot inode) {
+  public void insertAfter(FileWithSnapshot inode) {
     inode.setNext(this.getNext());
     this.setNext(inode);
   }
+  
+  @Override
+  public void insertBefore(FileWithSnapshot inode) {
+    inode.setNext(this);
+    if (this.next == null || this.next == this) {
+      this.next = inode;
+      return;
+    }
+    FileWithSnapshot previous = Util.getPrevious(this);
+    previous.setNext(inode);
+  }
+
+  @Override
+  public void removeSelf() {
+    if (this.next != null && this.next != this) {
+      FileWithSnapshot previous = Util.getPrevious(this);
+      previous.setNext(next);
+    }
+    this.next = null;
+  }
 
   @Override
   public short getBlockReplication() {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1437256&r1=1437255&r2=1437256&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Wed Jan 23 02:48:01 2013
@@ -17,9 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
+import java.io.DataOutput;
+import java.io.IOException;
 import java.util.Comparator;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -69,6 +74,13 @@ public class Snapshot implements Compara
     public INode getChild(byte[] name, Snapshot snapshot) {
       return getParent().getChild(name, snapshot);
     }
+    
+    @Override
+    public String getFullPathName() {
+      return getParent().getFullPathName() + Path.SEPARATOR
+          + HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR
+          + this.getLocalName();
+    }
   }
 
   /** Snapshot ID. */
@@ -83,7 +95,13 @@ public class Snapshot implements Compara
     this.root.setLocalName(name);
     this.root.setParent(dir);
   }
-
+  
+  /** Constructor used when loading fsimage */
+  Snapshot(int id, INodeDirectory root) {
+    this.id = id;
+    this.root = new Root(root);
+  }
+  
   /** @return the root directory of the snapshot. */
   public Root getRoot() {
     return root;
@@ -113,4 +131,11 @@ public class Snapshot implements Compara
   public String toString() {
     return getClass().getSimpleName() + "." + root.getLocalName();
   }
+  
+  /** Serialize the fields to out */
+  void write(DataOutput out) throws IOException {
+    out.writeInt(id);
+    // write root
+    FSImageSerialization.writeINodeDirectory(root, out);
+  }
 }



Mime
View raw message