hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1446000 [2/2] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ ...
Date Thu, 14 Feb 2013 00:43:29 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Thu Feb 14 00:43:28 2013
@@ -27,14 +27,12 @@ import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.UndoInfo;
@@ -59,6 +57,10 @@ public class INodeDirectoryWithSnapshot 
       super(created, deleted);
     }
 
+    private final INode setCreatedChild(final int c, final INode newChild) {
+      return getCreatedList().set(c, newChild);
+    }
+
     /** Serialize {@link #created} */
     private void writeCreated(DataOutputStream out) throws IOException {
         final List<INode> created = getCreatedList();
@@ -76,30 +78,7 @@ public class INodeDirectoryWithSnapshot 
         final List<INode> deleted = getDeletedList();
         out.writeInt(deleted.size());
         for (INode node : deleted) {
-          if (node.isDirectory()) {
-            FSImageSerialization.writeINodeDirectory((INodeDirectory) node, out);
-          } else { // INodeFile
-            final List<INode> created = getCreatedList();
-            // we write the block information only for INodeFile node when the
-            // node is only stored in the deleted list or the node is not a
-            // snapshot copy
-            int createdIndex = search(created, node.getKey());
-            if (createdIndex < 0) {
-              FSImageSerialization.writeINodeFile((INodeFile) node, out, true);
-            } else {
-              INodeFile cNode = (INodeFile) created.get(createdIndex);
-              INodeFile dNode = (INodeFile) node;
-              // A corner case here: after deleting a Snapshot, when combining
-              // SnapshotDiff, we may put two inodes sharing the same name but
-              // with totally different blocks in the created and deleted list of
-              // the same SnapshotDiff.
-              if (INodeFile.isOfSameFile(cNode, dNode)) {
-                FSImageSerialization.writeINodeFile(dNode, out, false);
-              } else {
-                FSImageSerialization.writeINodeFile(dNode, out, true);
-              }
-            }
-          }
+          FSImageSerialization.saveINode2Image(node, out);
         }
     }
     
@@ -108,7 +87,7 @@ public class INodeDirectoryWithSnapshot 
       writeCreated(out);
       writeDeleted(out);    
     }
-    
+
     /** @return The list of INodeDirectory contained in the deleted list */
     private List<INodeDirectory> getDirsInDeleted() {
       List<INodeDirectory> dirList = new ArrayList<INodeDirectory>();
@@ -206,24 +185,14 @@ public class INodeDirectoryWithSnapshot 
     }
 
     @Override
-    INodeDirectory createSnapshotCopyOfCurrentINode(INodeDirectory currentDir) {
-      final INodeDirectory copy = currentDir instanceof INodeDirectoryWithQuota?
-          new INodeDirectoryWithQuota(currentDir, false,
-              currentDir.getNsQuota(), currentDir.getDsQuota())
-        : new INodeDirectory(currentDir, false);
-      copy.setChildren(null);
-      return copy;
-    }
-
-    @Override
     void combinePosteriorAndCollectBlocks(final INodeDirectory currentDir,
         final DirectoryDiff posterior, final BlocksMapUpdateInfo collectedBlocks) {
       diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
         /** Collect blocks for deleted files. */
         @Override
         public void process(INode inode) {
-          if (inode != null && inode instanceof INodeFile) {
-            ((INodeFile)inode).destroySubtreeAndCollectBlocks(null,
+          if (inode != null) {
+            inode.destroySubtreeAndCollectBlocks(posterior.snapshot,
                 collectedBlocks);
           }
         }
@@ -295,16 +264,11 @@ public class INodeDirectoryWithSnapshot 
       return super.toString() + " childrenSize=" + childrenSize + ", " + diff;
     }
     
-    /** Serialize fields to out */
+    @Override
     void write(DataOutputStream out) throws IOException {
+      writeSnapshotPath(out);
       out.writeInt(childrenSize);
-      // No need to write all fields of Snapshot here, since the snapshot must
-      // have been recorded before when writing the FSImage. We only need to
-      // record the full path of its root.
-      byte[] fullPath = DFSUtil.string2Bytes(snapshot.getRoot()
-          .getFullPathName());
-      out.writeShort(fullPath.length);
-      out.write(fullPath);
+
       // write snapshotINode
       if (isSnapshotRoot()) {
         out.writeBoolean(true);
@@ -322,21 +286,31 @@ public class INodeDirectoryWithSnapshot 
     }
   }
 
-  /** A list of directory diffs. */
-  class DirectoryDiffList extends
-      AbstractINodeDiffList<INodeDirectory, DirectoryDiff> {
-    DirectoryDiffList(List<DirectoryDiff> diffs) {
-      super(diffs);
-    }
+  static class DirectoryDiffFactory
+      extends AbstractINodeDiff.Factory<INodeDirectory, DirectoryDiff> {
+    static final DirectoryDiffFactory INSTANCE = new DirectoryDiffFactory();
 
     @Override
-    INodeDirectoryWithSnapshot getCurrentINode() {
-      return INodeDirectoryWithSnapshot.this;
+    DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) {
+      return new DirectoryDiff(snapshot, currentDir);
     }
 
     @Override
-    DirectoryDiff addSnapshotDiff(Snapshot snapshot) {
-      return addLast(new DirectoryDiff(snapshot, getCurrentINode()));
+    INodeDirectory createSnapshotCopy(INodeDirectory currentDir) {
+      final INodeDirectory copy = currentDir instanceof INodeDirectoryWithQuota?
+          new INodeDirectoryWithQuota(currentDir, false,
+              currentDir.getNsQuota(), currentDir.getDsQuota())
+        : new INodeDirectory(currentDir, false);
+      copy.setChildren(null);
+      return copy;
+    }
+  }
+
+  /** A list of directory diffs. */
+  static class DirectoryDiffList
+      extends AbstractINodeDiffList<INodeDirectory, DirectoryDiff> {
+    DirectoryDiffList() {
+      setFactory(DirectoryDiffFactory.INSTANCE);
     }
   }
 
@@ -425,7 +399,7 @@ public class INodeDirectoryWithSnapshot 
   INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
       DirectoryDiffList diffs) {
     super(that, adopt, that.getNsQuota(), that.getDsQuota());
-    this.diffs = new DirectoryDiffList(diffs == null? null: diffs.asList());
+    this.diffs = diffs != null? diffs: new DirectoryDiffList();
   }
 
   /** @return the last snapshot. */
@@ -439,14 +413,15 @@ public class INodeDirectoryWithSnapshot 
   }
 
   @Override
-  public INodeDirectoryWithSnapshot recordModification(Snapshot latest) {
-    return saveSelf2Snapshot(latest, null);
+  public INodeDirectoryWithSnapshot recordModification(final Snapshot latest) {
+    return isInLatestSnapshot(latest)?
+        saveSelf2Snapshot(latest, null): this;
   }
 
   /** Save the snapshot copy to the latest snapshot. */
   public INodeDirectoryWithSnapshot saveSelf2Snapshot(
       final Snapshot latest, final INodeDirectory snapshotCopy) {
-    diffs.saveSelf2Snapshot(latest, snapshotCopy);
+    diffs.saveSelf2Snapshot(latest, this, snapshotCopy);
     return this;
   }
 
@@ -459,7 +434,7 @@ public class INodeDirectoryWithSnapshot 
       return child;
     }
 
-    final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest);
+    final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, this);
     if (diff.getChild(child.getLocalNameBytes(), false, this) != null) {
       // it was already saved in the latest snapshot earlier.  
       return child;
@@ -474,7 +449,7 @@ public class INodeDirectoryWithSnapshot 
     ChildrenDiff diff = null;
     Integer undoInfo = null;
     if (latest != null) {
-      diff = diffs.checkAndAddLatestSnapshotDiff(latest).diff;
+      diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
       undoInfo = diff.create(inode);
     }
     final boolean added = super.addChild(inode, setModTime, null);
@@ -489,7 +464,7 @@ public class INodeDirectoryWithSnapshot 
     ChildrenDiff diff = null;
     UndoInfo<INode> undoInfo = null;
     if (latest != null) {
-      diff = diffs.checkAndAddLatestSnapshotDiff(latest).diff;
+      diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
       undoInfo = diff.delete(child);
     }
     final INode removed = super.removeChild(child, null);
@@ -503,6 +478,24 @@ public class INodeDirectoryWithSnapshot 
   }
   
   @Override
+  public void replaceChild(final INode oldChild, final INode newChild) {
+    super.replaceChild(oldChild, newChild);
+
+    // replace the created child, if there is any.
+    final byte[] name = oldChild.getLocalNameBytes();
+    final List<DirectoryDiff> diffList = diffs.asList();
+    for(int i = diffList.size() - 1; i >= 0; i--) {
+      final ChildrenDiff diff = diffList.get(i).diff;
+      final int c = diff.searchCreatedIndex(name);
+      if (c >= 0) {
+        final INode removed = diff.setCreatedChild(c, newChild);
+        Preconditions.checkState(removed == oldChild);
+        return;
+      }
+    }
+  }
+
+  @Override
   public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) {
     final DirectoryDiff diff = diffs.getDiff(snapshot);
     return diff != null? diff.getChildrenList(this): super.getChildrenList(null);
@@ -551,12 +544,12 @@ public class INodeDirectoryWithSnapshot 
   }
   
   /**
-   * Get all the INodeDirectory stored in the deletes lists.
+   * Get all the directories that are stored in some snapshot but not in the
+   * current children list. These directories are equivalent to the directories
+   * stored in the deletes lists.
    * 
-   * @param snapshotDirMap
-   *          A HashMap storing all the INodeDirectory stored in the deleted
-   *          lists, with their associated full Snapshot.
-   * @return The number of INodeDirectory returned.
+   * @param snapshotDirMap A snapshot-to-directory-list map for returning.
+   * @return The number of directories returned.
    */
   public int getSnapshotDirectory(
       Map<Snapshot, List<INodeDirectory>> snapshotDirMap) {
@@ -574,11 +567,10 @@ public class INodeDirectoryWithSnapshot 
   @Override
   public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
       final BlocksMapUpdateInfo collectedBlocks) {
-    int n = destroySubtreeAndCollectBlocksRecursively(
-        snapshot, collectedBlocks);
+    int n = destroySubtreeAndCollectBlocksRecursively(snapshot, collectedBlocks);
     if (snapshot != null) {
       final DirectoryDiff removed = getDiffs().deleteSnapshotDiff(snapshot,
-          collectedBlocks);
+          this, collectedBlocks);
       if (removed != null) {
         n++; //count this dir only if a snapshot diff is removed.
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java Thu Feb 14 00:43:28 2013
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import java.util.List;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -34,15 +32,13 @@ import org.apache.hadoop.hdfs.server.nam
 public class INodeFileUnderConstructionWithSnapshot
     extends INodeFileUnderConstruction implements FileWithSnapshot {
   /**
-   * The difference of an {@link INodeFileUnderConstruction} between two snapshots.
+   * Factory for {@link INodeFileUnderConstruction} diff.
    */
-  static class FileUcDiff extends FileDiff {
-    private FileUcDiff(Snapshot snapshot, INodeFile file) {
-      super(snapshot, file);
-    }
+  static class FileUcDiffFactory extends FileDiffFactory {
+    static final FileUcDiffFactory INSTANCE = new FileUcDiffFactory();
 
     @Override
-    INodeFileUnderConstruction createSnapshotCopyOfCurrentINode(INodeFile file) {
+    INodeFileUnderConstruction createSnapshotCopy(INodeFile file) {
       final INodeFileUnderConstruction uc = (INodeFileUnderConstruction)file;
       final INodeFileUnderConstruction copy = new INodeFileUnderConstruction(
           uc, uc.getClientName(), uc.getClientMachine(), uc.getClientNode());
@@ -51,29 +47,17 @@ public class INodeFileUnderConstructionW
     }
   }
 
-  /**
-   * A list of file diffs.
-   */
-  static class FileUcDiffList extends FileDiffList {
-    private FileUcDiffList(INodeFile currentINode, final List<FileDiff> diffs) {
-      super(currentINode, diffs);
-    }
-
-    @Override
-    FileDiff addSnapshotDiff(Snapshot snapshot) {
-      return addLast(new FileUcDiff(snapshot, getCurrentINode()));
-    }
-  }
-
-  private final FileUcDiffList diffs;
+  private final FileDiffList diffs;
+  private boolean isCurrentFileDeleted = false;
 
   INodeFileUnderConstructionWithSnapshot(final INodeFile f,
       final String clientName,
       final String clientMachine,
-      final DatanodeDescriptor clientNode) {
+      final DatanodeDescriptor clientNode,
+      final FileDiffList diffs) {
     super(f, clientName, clientMachine, clientNode);
-    this.diffs = new FileUcDiffList(this, f instanceof FileWithSnapshot?
-        ((FileWithSnapshot)f).getFileDiffList().asList(): null);
+    this.diffs = diffs != null? diffs: new FileDiffList();
+    this.diffs.setFactory(FileUcDiffFactory.INSTANCE);
   }
 
   /**
@@ -82,15 +66,16 @@ public class INodeFileUnderConstructionW
    * 
    * @param f The given {@link INodeFileUnderConstruction} instance
    */
-  public INodeFileUnderConstructionWithSnapshot(INodeFileUnderConstruction f) {
-    this(f, f.getClientName(), f.getClientMachine(), f.getClientNode());
+  public INodeFileUnderConstructionWithSnapshot(INodeFileUnderConstruction f,
+      final FileDiffList diffs) {
+    this(f, f.getClientName(), f.getClientMachine(), f.getClientNode(), diffs);
   }
   
   @Override
   protected INodeFileWithSnapshot toINodeFile(final long mtime) {
     assertAllBlocksComplete();
     final long atime = getModificationTime();
-    final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this);
+    final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this, getDiffs());
     f.setModificationTime(mtime, null);
     f.setAccessTime(atime, null);
     return f;
@@ -98,16 +83,14 @@ public class INodeFileUnderConstructionW
 
   @Override
   public boolean isCurrentFileDeleted() {
-    return getParent() == null;
+    return isCurrentFileDeleted;
   }
 
   @Override
   public INodeFileUnderConstructionWithSnapshot recordModification(
       final Snapshot latest) {
-    // if this object is NOT the latest snapshot copy, this object is created
-    // after the latest snapshot, then do NOT record modification.
-    if (this == getParent().getChild(getLocalNameBytes(), latest)) {
-      diffs.saveSelf2Snapshot(latest, null);
+    if (isInLatestSnapshot(latest)) {
+      diffs.saveSelf2Snapshot(latest, this, null);
     }
     return this;
   }
@@ -118,7 +101,7 @@ public class INodeFileUnderConstructionW
   }
 
   @Override
-  public FileDiffList getFileDiffList() {
+  public FileDiffList getDiffs() {
     return diffs;
   }
 
@@ -146,9 +129,9 @@ public class INodeFileUnderConstructionW
   public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
       final BlocksMapUpdateInfo collectedBlocks) {
     if (snapshot == null) {
-      clearReferences();
+      isCurrentFileDeleted = true;
     } else {
-      if (diffs.deleteSnapshotDiff(snapshot, collectedBlocks) == null) {
+      if (diffs.deleteSnapshotDiff(snapshot, this, collectedBlocks) == null) {
         //snapshot diff not found and nothing is deleted.
         return 0;
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Thu Feb 14 00:43:28 2013
@@ -30,11 +30,17 @@ import org.apache.hadoop.hdfs.server.nam
 public class INodeFileWithSnapshot extends INodeFile
     implements FileWithSnapshot {
   private final FileDiffList diffs;
+  private boolean isCurrentFileDeleted = false;
 
   public INodeFileWithSnapshot(INodeFile f) {
+    this(f, f instanceof FileWithSnapshot?
+        ((FileWithSnapshot)f).getDiffs(): null);
+  }
+
+  public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) {
     super(f);
-    this.diffs = new FileDiffList(this, f instanceof FileWithSnapshot?
-        ((FileWithSnapshot)f).getFileDiffList().asList(): null);
+    this.diffs = diffs != null? diffs: new FileDiffList();
+    this.diffs.setFactory(FileDiffFactory.INSTANCE);
   }
 
   @Override
@@ -43,20 +49,18 @@ public class INodeFileWithSnapshot exten
       final String clientMachine,
       final DatanodeDescriptor clientNode) {
     return new INodeFileUnderConstructionWithSnapshot(this,
-        clientName, clientMachine, clientNode);
+        clientName, clientMachine, clientNode, getDiffs());
   }
 
   @Override
   public boolean isCurrentFileDeleted() {
-    return getParent() == null;
+    return isCurrentFileDeleted;
   }
 
   @Override
   public INodeFileWithSnapshot recordModification(final Snapshot latest) {
-    // if this object is NOT the latest snapshot copy, this object is created
-    // after the latest snapshot, then do NOT record modification.
-    if (this == getParent().getChild(getLocalNameBytes(), latest)) {
-      diffs.saveSelf2Snapshot(latest, null);
+    if (isInLatestSnapshot(latest)) {
+      diffs.saveSelf2Snapshot(latest, this, null);
     }
     return this;
   }
@@ -67,7 +71,7 @@ public class INodeFileWithSnapshot exten
   }
 
   @Override
-  public FileDiffList getFileDiffList() {
+  public FileDiffList getDiffs() {
     return diffs;
   }
 
@@ -95,9 +99,9 @@ public class INodeFileWithSnapshot exten
   public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
       final BlocksMapUpdateInfo collectedBlocks) {
     if (snapshot == null) {
-      clearReferences();
+      isCurrentFileDeleted = true;
     } else {
-      if (diffs.deleteSnapshotDiff(snapshot, collectedBlocks) == null) {
+      if (diffs.deleteSnapshotDiff(snapshot, this, collectedBlocks) == null) {
         //snapshot diff not found and nothing is deleted.
         return 0;
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Thu Feb 14 00:43:28 2013
@@ -90,15 +90,15 @@ public class Snapshot implements Compara
   private final Root root;
 
   Snapshot(int id, String name, INodeDirectorySnapshottable dir) {
-    this(id, DFSUtil.string2Bytes(name), dir, dir);
+    this(id, dir, dir);
+    this.root.setLocalName(DFSUtil.string2Bytes(name));
   }
 
-  Snapshot(int id, byte[] name, INodeDirectory dir,
+  Snapshot(int id, INodeDirectory dir,
       INodeDirectorySnapshottable parent) {
     this.id = id;
     this.root = new Root(dir);
 
-    this.root.setLocalName(name);
     this.root.setParent(parent);
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Thu Feb 14 00:43:28 2013
@@ -32,8 +32,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
@@ -67,20 +69,68 @@ public class SnapshotFSImageFormat {
    * @param sNode The directory that the SnapshotDiff list belongs to.
    * @param out The {@link DataOutputStream} to write.
    */
-  public static void saveSnapshotDiffs(INodeDirectoryWithSnapshot sNode,
-      DataOutputStream out) throws IOException {
-    // # of SnapshotDiff
-    List<DirectoryDiff> diffs = sNode.getDiffs().asList();
-    // Record the SnapshotDiff in reversed order, so that we can find the
-    // correct reference for INodes in the created list when loading the
-    // FSImage
-    out.writeInt(diffs.size());
-    for (int i = diffs.size() - 1; i >= 0; i--) {
-      DirectoryDiff sdiff = diffs.get(i);
-      sdiff.write(out);
+  private static <N extends INode, D extends AbstractINodeDiff<N, D>>
+      void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs,
+      final DataOutputStream out) throws IOException {
+    // Record the diffs in reversed order, so that we can find the correct
+    // reference for INodes in the created list when loading the FSImage
+    if (diffs == null) {
+      out.writeInt(-1); // no diffs
+    } else {
+      final List<D> list = diffs.asList();
+      final int size = list.size();
+      out.writeInt(size);
+      for (int i = size - 1; i >= 0; i--) {
+        list.get(i).write(out);
+      }
     }
   }
   
+  public static void saveDirectoryDiffList(final INodeDirectory dir,
+      final DataOutputStream out) throws IOException {
+    saveINodeDiffs(dir instanceof INodeDirectoryWithSnapshot?
+        ((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out);
+  }
+  
+  public static void saveFileDiffList(final INodeFile file,
+      final DataOutputStream out) throws IOException {
+    saveINodeDiffs(file instanceof FileWithSnapshot?
+        ((FileWithSnapshot)file).getDiffs(): null, out);
+  }
+
+  public static FileDiffList loadFileDiffList(DataInputStream in,
+      FSImageFormat.Loader loader) throws IOException {
+    final int size = in.readInt();
+    if (size == -1) {
+      return null;
+    } else {
+      final FileDiffList diffs = new FileDiffList();
+      FileDiff posterior = null;
+      for(int i = 0; i < size; i++) {
+        final FileDiff d = loadFileDiff(posterior, in, loader);
+        diffs.addFirst(d);
+        posterior = d;
+      }
+      return diffs;
+    }
+  }
+
+  private static FileDiff loadFileDiff(FileDiff posterior, DataInputStream in,
+      FSImageFormat.Loader loader) throws IOException {
+    // 1. Read the full path of the Snapshot root to identify the Snapshot
+    Snapshot snapshot = findSnapshot(FSImageSerialization.readString(in),
+        loader.getFSDirectoryInLoading());
+
+    // 2. Load file size
+    final long fileSize = in.readLong();
+    
+    // 3. Load snapshotINode 
+    final INodeFile snapshotINode = in.readBoolean()?
+        (INodeFile) loader.loadINodeWithLocalName(true, in): null;
+    
+    return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
+  }
+
   /**
    * Load a node stored in the created list from fsimage.
    * @param createdNodeName The name of the created node.
@@ -92,9 +142,9 @@ public class SnapshotFSImageFormat {
     // the INode in the created list should be a reference to another INode
     // in posterior SnapshotDiffs or one of the current children
     for (DirectoryDiff postDiff : parent.getDiffs()) {
-      INode created = findCreated(createdNodeName, postDiff.getChildrenDiff());
-      if (created != null) {
-        return created;
+      INode d = postDiff.getChildrenDiff().searchDeleted(createdNodeName);
+      if (d != null) {
+        return d;
       } // else go to the next SnapshotDiff
     } 
     // use the current child
@@ -108,41 +158,6 @@ public class SnapshotFSImageFormat {
   }
   
   /**
-   * Search the given {@link ChildrenDiff} to find an inode matching the specific name.
-   * @param createdNodeName The name of the node for searching.
-   * @param diff The given {@link ChildrenDiff} where to search the node.
-   * @return The matched inode. Return null if no matched inode can be found.
-   */
-  private static INode findCreated(byte[] createdNodeName, ChildrenDiff diff) {
-    INode c = diff.searchCreated(createdNodeName);
-    INode d = diff.searchDeleted(createdNodeName);
-    if (c == null && d != null) {
-      // if an INode with the same name is only contained in the deleted
-      // list, then the node should be the snapshot copy of a deleted
-      // node, and the node in the created list should be its reference 
-      return d;
-    } else if (c != null && d != null) {
-      // in a posterior SnapshotDiff, if the created/deleted lists both
-      // contains nodes with the same name (c & d), there are two
-      // possibilities:
-      // 
-      // 1) c and d are used to represent a modification, and 
-      // 2) d indicates the deletion of the node, while c was originally
-      // contained in the created list of a later snapshot, but c was
-      // moved here because of the snapshot deletion.
-      // 
-      // For case 1), c and d should be both INodeFile and should share
-      // the same blockInfo list.
-      if (c.isFile() && INodeFile.isOfSameFile((INodeFile) c, (INodeFile) d)) {
-        return c;
-      } else {
-        return d;
-      }
-    }
-    return null;
-  }
-  
-  /**
    * Load the created list from fsimage.
    * @param parent The directory that the created list belongs to.
    * @param in The {@link DataInputStream} to read.
@@ -169,8 +184,7 @@ public class SnapshotFSImageFormat {
    * @param createdList The created list associated with the deleted list in 
    *                    the same Diff.
    * @param in The {@link DataInputStream} to read.
-   * @param loader The {@link Loader} instance. Used to call the
-   *               {@link Loader#loadINode(DataInputStream)} method.
+   * @param loader The {@link Loader} instance.
    * @return The deleted list.
    */
   private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
@@ -179,10 +193,7 @@ public class SnapshotFSImageFormat {
     int deletedSize = in.readInt();
     List<INode> deletedList = new ArrayList<INode>(deletedSize);
     for (int i = 0; i < deletedSize; i++) {
-      byte[] deletedNodeName = new byte[in.readShort()];
-      in.readFully(deletedNodeName);
-      INode deleted = loader.loadINode(in);
-      deleted.setLocalName(deletedNodeName);
+      final INode deleted = loader.loadINodeWithLocalName(false, in);
       deletedList.add(deleted);
       // set parent: the parent field of an INode in the deleted list is not 
       // useful, but set the parent here to be consistent with the original 
@@ -192,11 +203,11 @@ public class SnapshotFSImageFormat {
           && ((INodeFile) deleted).getBlocks() == null) {
         // if deleted is an INodeFile, and its blocks is null, then deleted
         // must be an INodeFileWithLink, and we need to rebuild its next link
-        int c = Collections.binarySearch(createdList, deletedNodeName);
+        int c = Collections.binarySearch(createdList, deleted.getLocalNameBytes());
         if (c < 0) {
           throw new IOException(
               "Cannot find the INode linked with the INode "
-                  + DFSUtil.bytes2String(deletedNodeName)
+                  + deleted.getLocalName()
                   + " in deleted list while loading FSImage.");
         }
         // deleted must be an FileWithSnapshot (INodeFileSnapshot or 
@@ -239,29 +250,30 @@ public class SnapshotFSImageFormat {
   private static Snapshot loadSnapshot(INodeDirectorySnapshottable parent,
       DataInputStream in, FSImageFormat.Loader loader) throws IOException {
     int snapshotId = in.readInt();
-    byte[] snapshotName = new byte[in.readShort()];
-    in.readFully(snapshotName);
-    final INodeDirectory rootNode = (INodeDirectory)loader.loadINode(in);
-    return new Snapshot(snapshotId, snapshotName, rootNode, parent);
+    INodeDirectory rootNode = (INodeDirectory)loader.loadINodeWithLocalName(
+        false, in);
+    return new Snapshot(snapshotId, rootNode, parent);
   }
   
   /**
    * Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
    * directory.
-   * @param snapshottableParent The snapshottable directory for loading.
+   * @param dir The snapshottable directory for loading.
    * @param numSnapshotDiffs The number of {@link SnapshotDiff} that the 
    *                         directory has.
    * @param in The {@link DataInputStream} instance to read.
    * @param loader The {@link Loader} instance that this loading procedure is 
    *               using.
    */
-  public static void loadSnapshotDiffList(
-      INodeDirectoryWithSnapshot parentWithSnapshot, int numSnapshotDiffs,
-      DataInputStream in, FSImageFormat.Loader loader)
-      throws IOException {
-    for (int i = 0; i < numSnapshotDiffs; i++) {
-      DirectoryDiff diff = loadSnapshotDiff(parentWithSnapshot, in, loader);
-      parentWithSnapshot.getDiffs().addFirst(diff);
+  public static void loadDirectoryDiffList(INodeDirectory dir,
+      DataInputStream in, FSImageFormat.Loader loader) throws IOException {
+    final int size = in.readInt();
+    if (size != -1) {
+      INodeDirectoryWithSnapshot withSnapshot = (INodeDirectoryWithSnapshot)dir;
+      DirectoryDiffList diffs = withSnapshot.getDiffs();
+      for (int i = 0; i < size; i++) {
+        diffs.addFirst(loadDirectoryDiff(withSnapshot, in, loader));
+      }
     }
   }
   
@@ -287,7 +299,7 @@ public class SnapshotFSImageFormat {
    *               using.
    * @return The snapshotINode.
    */
-  private static INodeDirectory loadSnapshotINodeInSnapshotDiff(
+  private static INodeDirectory loadSnapshotINodeInDirectoryDiff(
       Snapshot snapshot, DataInputStream in, FSImageFormat.Loader loader)
       throws IOException {
     // read the boolean indicating whether snapshotINode == Snapshot.Root
@@ -296,37 +308,31 @@ public class SnapshotFSImageFormat {
       return snapshot.getRoot();
     } else {
       // another boolean is used to indicate whether snapshotINode is non-null
-      if (in.readBoolean()) {
-        byte[] localName = new byte[in.readShort()];
-        in.readFully(localName);
-        INodeDirectory snapshotINode = (INodeDirectory) loader.loadINode(in);
-        snapshotINode.setLocalName(localName);
-        return snapshotINode;
-      }
+      return in.readBoolean()?
+          (INodeDirectory) loader.loadINodeWithLocalName(true, in): null;
     }
-    return null;
   }
    
   /**
-   * Load {@link SnapshotDiff} from fsimage.
+   * Load {@link DirectoryDiff} from fsimage.
    * @param parent The directory that the SnapshotDiff belongs to.
    * @param in The {@link DataInputStream} instance to read.
    * @param loader The {@link Loader} instance that this loading procedure is 
    *               using.
-   * @return A {@link SnapshotDiff}.
+   * @return A {@link DirectoryDiff}.
    */
-  private static DirectoryDiff loadSnapshotDiff(
+  private static DirectoryDiff loadDirectoryDiff(
       INodeDirectoryWithSnapshot parent, DataInputStream in,
       FSImageFormat.Loader loader) throws IOException {
-    // 1. Load SnapshotDiff#childrenSize
-    int childrenSize = in.readInt();
-    // 2. Read the full path of the Snapshot's Root, identify 
-    //    SnapshotDiff#Snapshot
+    // 1. Read the full path of the Snapshot root to identify the Snapshot
     Snapshot snapshot = findSnapshot(FSImageSerialization.readString(in),
         loader.getFSDirectoryInLoading());
+
+    // 2. Load DirectoryDiff#childrenSize
+    int childrenSize = in.readInt();
     
-    // 3. Load SnapshotDiff#snapshotINode 
-    INodeDirectory snapshotINode = loadSnapshotINodeInSnapshotDiff(snapshot,
+    // 3. Load DirectoryDiff#snapshotINode 
+    INodeDirectory snapshotINode = loadSnapshotINodeInDirectoryDiff(snapshot,
         in, loader);
     
     // 4. Load the created list in SnapshotDiff#Diff

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Thu Feb 14 00:43:28 2013
@@ -224,7 +224,7 @@ public class SnapshotManager implements 
         SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
             dir.getModificationTime(), dir.getAccessTime(),
             dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
-            dir.getLocalNameBytes(), dir.getNumSnapshots(),
+            dir.getLocalNameBytes(), dir.getId(), dir.getNumSnapshots(),
             dir.getSnapshotQuota(), dir.getParent() == null ? INode.EMPTY_BYTES
                 : DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
         statusList.add(status);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/Diff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/Diff.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/Diff.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/Diff.java Thu Feb 14 00:43:28 2013
@@ -162,12 +162,16 @@ public class Diff<K, E extends Diff.Elem
     return deleted == null? Collections.<E>emptyList(): deleted;
   }
 
+  public int searchCreatedIndex(final K name) {
+    return search(created, name);
+  }
+
   /**
    * @return null if the element is not found;
    *         otherwise, return the element in the c-list.
    */
   public E searchCreated(final K name) {
-    final int c = search(created, name);
+    final int c = searchCreatedIndex(name);
     return c < 0 ? null : created.get(c);
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Thu Feb 14 00:43:28 2013
@@ -63,7 +63,7 @@ public class CreateEditsLog {
     PermissionStatus p = new PermissionStatus("joeDoe", "people",
                                       new FsPermission((short)0777));
     INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
-        p, 0L);
+        null, p, 0L);
     editLog.logMkDir(BASE_PATH, dirInode);
     long blockSize = 10;
     BlockInfo[] blocks = new BlockInfo[blocksPerFile];
@@ -92,7 +92,7 @@ public class CreateEditsLog {
       // Log the new sub directory in edits
       if ((iF % nameGenerator.getFilesPerDirectory())  == 0) {
         String currentDir = nameGenerator.getCurrentDir();
-        dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L);
+        dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, null, p, 0L);
         editLog.logMkDir(currentDir, dirInode);
       }
       editLog.logOpenFile(filePath, new INodeFileUnderConstruction(

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Thu Feb 14 00:43:28 2013
@@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -217,7 +218,8 @@ public abstract class FSImageTestUtil {
         FsPermission.createImmutable((short)0755));
     for (int i = 1; i <= numDirs; i++) {
       String dirName = "dir" + i;
-      INodeDirectory dir = new INodeDirectory(newInodeId + i -1, dirName, perms);
+      INodeDirectory dir = new INodeDirectory(newInodeId + i -1,
+          DFSUtil.string2Bytes(dirName), perms, 0L);
       editLog.logMkDir("/" + dirName, dir);
     }
     editLog.logSync();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java Thu Feb 14 00:43:28 2013
@@ -24,8 +24,10 @@ import java.io.PrintWriter;
 import java.util.EnumSet;
 import java.util.Random;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -34,6 +36,7 @@ import org.apache.hadoop.hdfs.client.Hdf
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.util.Canceler;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -42,6 +45,11 @@ import org.junit.Test;
  * Test FSImage save/load when Snapshot is supported
  */
 public class TestFSImageWithSnapshot {
+  {
+    SnapshotTestHelper.disableLogs();
+    ((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
+  }
+
   static final long seed = 0;
   static final short REPLICATION = 3;
   static final int BLOCKSIZE = 1024;
@@ -160,36 +168,49 @@ public class TestFSImageWithSnapshot {
    * 6. Dump the FSDirectory again and compare the two dumped string.
    * </pre>
    */
-//  TODO: fix snapshot fsimage
-//  @Test
+  @Test
   public void testSaveLoadImage() throws Exception {
+    int s = 0;
     // make changes to the namesystem
     hdfs.mkdirs(dir);
     hdfs.allowSnapshot(dir.toString());
-    hdfs.createSnapshot(dir, "s0");
-    
+
+    hdfs.createSnapshot(dir, "s" + ++s);
     Path sub1 = new Path(dir, "sub1");
+    hdfs.mkdirs(sub1);
+    hdfs.setPermission(sub1, new FsPermission((short)0777));
+    Path sub11 = new Path(sub1, "sub11");
+    hdfs.mkdirs(sub11);
+    checkImage(s);
+
+    hdfs.createSnapshot(dir, "s" + ++s);
     Path sub1file1 = new Path(sub1, "sub1file1");
     Path sub1file2 = new Path(sub1, "sub1file2");
     DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
     DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
+    checkImage(s);
     
-    hdfs.createSnapshot(dir, "s1");
-    
+    hdfs.createSnapshot(dir, "s" + ++s);
     Path sub2 = new Path(dir, "sub2");
     Path sub2file1 = new Path(sub2, "sub2file1");
     Path sub2file2 = new Path(sub2, "sub2file2");
     DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPLICATION, seed);
     DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, REPLICATION, seed);
+    checkImage(s);
+
+    hdfs.createSnapshot(dir, "s" + ++s);
     hdfs.setReplication(sub1file1, (short) (REPLICATION - 1));
     hdfs.delete(sub1file2, true);
-    
-    hdfs.createSnapshot(dir, "s2");
     hdfs.setOwner(sub2, "dr.who", "unknown");
     hdfs.delete(sub2file2, true);
-    
+    checkImage(s);
+  }
+
+  void checkImage(int s) throws IOException {
+    final String name = "s" + s;
+
     // dump the fsdir tree
-    File fsnBefore = dumpTree2File("before");
+    File fsnBefore = dumpTree2File(name + "_before");
     
     // save the namesystem to a temp file
     File imageFile = saveFSImageToTempFile();
@@ -206,7 +227,7 @@ public class TestFSImageWithSnapshot {
     loadFSImageFromTempFile(imageFile);
     
     // dump the fsdir tree again
-    File fsnAfter = dumpTree2File("after");
+    File fsnAfter = dumpTree2File(name + "_after");
     
     // compare two dumped tree
     SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter);
@@ -215,8 +236,7 @@ public class TestFSImageWithSnapshot {
   /**
    * Test the fsimage saving/loading while file appending.
    */
-//  TODO: fix snapshot fsimage
-//  @Test
+  @Test
   public void testSaveLoadImageWithAppending() throws Exception {
     Path sub1 = new Path(dir, "sub1");
     Path sub1file1 = new Path(sub1, "sub1file1");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Thu Feb 14 00:43:28 2013
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@@ -154,8 +155,7 @@ public class TestFsLimits {
     if (fs == null) fs = new MockFSDirectory();
 
     INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
-        name, perms);
-    child.setLocalName(name);
+        DFSUtil.string2Bytes(name), perms, 0L);
     
     Class<?> generated = null;
     try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Thu Feb 14 00:43:28 2013
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -50,10 +51,15 @@ public class TestINodeFile {
   static final short BLOCKBITS = 48;
   static final long BLKSIZE_MAXVALUE = ~(0xffffL << BLOCKBITS);
 
-  private String userName = "Test";
+  private final PermissionStatus perm = new PermissionStatus(
+      "userName", null, FsPermission.getDefault());
   private short replication;
   private long preferredBlockSize;
 
+  INodeFile createINodeFile(short replication, long preferredBlockSize) {
+    return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
+        null, replication, preferredBlockSize);
+  }
   /**
    * Test for the Replication value. Sets a value and checks if it was set
    * correct.
@@ -62,9 +68,7 @@ public class TestINodeFile {
   public void testReplication () {
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
-        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
     assertEquals("True has to be returned in this case", replication,
                  inf.getFileReplication());
   }
@@ -79,9 +83,7 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = -1;
     preferredBlockSize = 128*1024*1024;
-    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
-        null, FsPermission.getDefault()), null, replication, 0L, 0L,
-        preferredBlockSize);
+    createINodeFile(replication, preferredBlockSize);
   }
 
   /**
@@ -92,9 +94,7 @@ public class TestINodeFile {
   public void testPreferredBlockSize () {
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
-        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
    assertEquals("True has to be returned in this case", preferredBlockSize,
         inf.getPreferredBlockSize());
  }
@@ -103,9 +103,7 @@ public class TestINodeFile {
   public void testPreferredBlockSizeUpperBound () {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
-        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
     assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
                  inf.getPreferredBlockSize());
   }
@@ -120,9 +118,7 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = -1;
-    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
-        null, FsPermission.getDefault()), null, replication, 0L, 0L,
-        preferredBlockSize);
+    createINodeFile(replication, preferredBlockSize);
   } 
 
   /**
@@ -135,26 +131,20 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE+1;
-    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
-        null, FsPermission.getDefault()), null, replication, 0L, 0L,
-        preferredBlockSize);
+    createINodeFile(replication, preferredBlockSize);
  }
 
   @Test
   public void testGetFullPathName() {
-    PermissionStatus perms = new PermissionStatus(
-      userName, null, FsPermission.getDefault());
-
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
     inf.setLocalName("f");
 
     INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
-        INodeDirectory.ROOT_NAME, perms);
-    INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d",
-        perms);
+        INodeDirectory.ROOT_NAME, perm, 0L);
+    INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
+        DFSUtil.string2Bytes("d"), perm, 0L);
 
     assertEquals("f", inf.getFullPathName());
     assertEquals("", inf.getLocalParentDir());
@@ -250,9 +240,7 @@ public class TestINodeFile {
     preferredBlockSize = 128 * 1024 * 1024;
     INodeFile[] iNodes = new INodeFile[nCount];
     for (int i = 0; i < nCount; i++) {
-      PermissionStatus perms = new PermissionStatus(userName, null,
-          FsPermission.getDefault());
-      iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L,
+      iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
           preferredBlockSize);
       iNodes[i].setLocalName(fileNamePrefix +  Integer.toString(i));
       BlockInfo newblock = new BlockInfo(replication);
@@ -270,8 +258,6 @@ public class TestINodeFile {
   @Test
   public void testValueOf () throws IOException {
     final String path = "/testValueOf";
-    final PermissionStatus perm = new PermissionStatus(
-        userName, null, FsPermission.getDefault());
     final short replication = 3;
 
     {//cast from null
@@ -303,8 +289,7 @@ public class TestINodeFile {
     }
 
     {//cast from INodeFile
-      final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm,
-          null, replication, 0L, 0L, preferredBlockSize);
+      final INode from = createINodeFile(replication, preferredBlockSize);
 
      //cast to INodeFile, should success
       final INodeFile f = INodeFile.valueOf(from, path);
@@ -349,8 +334,8 @@ public class TestINodeFile {
     }
 
     {//cast from INodeDirectory
-      final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm,
-          0L);
+      final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, null,
+          perm, 0L);
 
       //cast to INodeFile, should fail
       try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Thu Feb 14 00:43:28 2013
@@ -275,7 +275,9 @@ public class TestSnapshotPathINodes {
       assertSnapshot(nodesInPath, true, snapshot, 3);
   
       // Check the INode for file1 (snapshot file)
-      assertINodeFile(inodes[inodes.length - 1], file1);
+      final INode inode = inodes[inodes.length - 1];
+      assertEquals(file1.getName(), inode.getLocalName());
+      assertEquals(INodeFileWithSnapshot.class, inode.getClass());
     }
 
     // Check the INodes for path /TestSnapshot/sub1/file1

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java Thu Feb 14 00:43:28 2013
@@ -45,14 +45,18 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
+import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
+import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -64,7 +68,7 @@ public class SnapshotTestHelper {
   public static final Log LOG = LogFactory.getLog(SnapshotTestHelper.class);
 
   /** Disable the logs that are not very useful for snapshot related tests. */
-  static void disableLogs() {
+  public static void disableLogs() {
     final String[] lognames = {
         "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
         "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
@@ -73,11 +77,15 @@ public class SnapshotTestHelper {
     for(String n : lognames) {
       setLevel2OFF(LogFactory.getLog(n));
     }
-
+    
     setLevel2OFF(LogFactory.getLog(UserGroupInformation.class));
     setLevel2OFF(LogFactory.getLog(BlockManager.class));
     setLevel2OFF(LogFactory.getLog(FSNamesystem.class));
-
+    setLevel2OFF(LogFactory.getLog(DirectoryScanner.class));
+    setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
+    
+    setLevel2OFF(DataBlockScanner.LOG);
+    setLevel2OFF(HttpServer.LOG);
     setLevel2OFF(DataNode.LOG);
     setLevel2OFF(BlockPoolSliceStorage.LOG);
     setLevel2OFF(LeaseManager.LOG);
@@ -175,6 +183,15 @@ public class SnapshotTestHelper {
    */
   public static void compareDumpedTreeInFile(File file1, File file2)
       throws IOException {
+    try {
+      compareDumpedTreeInFile(file1, file2, false);
+    } catch(Throwable t) {
+      LOG.info("FAILED compareDumpedTreeInFile(" + file1 + ", " + file2 + ")", t);
+      compareDumpedTreeInFile(file1, file2, true);
+    }
+  }
+  private static void compareDumpedTreeInFile(File file1, File file2,
+      boolean print) throws IOException {
     BufferedReader reader1 = new BufferedReader(new FileReader(file1));
     BufferedReader reader2 = new BufferedReader(new FileReader(file2));
     try {
@@ -182,6 +199,11 @@ public class SnapshotTestHelper {
       String line2 = "";
       while ((line1 = reader1.readLine()) != null
           && (line2 = reader2.readLine()) != null) {
+        if (print) {
+          System.out.println();
+          System.out.println("1) " + line1);
+          System.out.println("2) " + line2);
+        }
         // skip the hashCode part of the object string during the comparison,
         // also ignore the difference between INodeFile/INodeFileWithSnapshot
         line1 = line1.replaceAll("INodeFileWithSnapshot", "INodeFile");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Thu Feb 14 00:43:28 2013
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.UnresolvedLi
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -171,7 +172,8 @@ public class TestNestedSnapshots {
   public void testIdCmp() {
     final PermissionStatus perm = PermissionStatus.createImmutable(
         "user", "group", FsPermission.createImmutable((short)0));
-    final INodeDirectory dir = new INodeDirectory(0, "foo", perm);
+    final INodeDirectory dir = new INodeDirectory(0,
+        DFSUtil.string2Bytes("foo"), perm, 0L);
     final INodeDirectorySnapshottable snapshottable
         = new INodeDirectorySnapshottable(dir);
     final Snapshot[] snapshots = {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Thu Feb 14 00:43:28 2013
@@ -268,8 +268,7 @@ public class TestSnapshot {
       modifyCurrentDirAndCheckSnapshots(new Modification[]{chmod, chown});
       
       // check fsimage saving/loading
-//      TODO: fix fsimage
-//      checkFSImage();
+      checkFSImage();
     }
   }
 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/TestDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/TestDiff.java?rev=1446000&r1=1445999&r2=1446000&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/TestDiff.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/diff/TestDiff.java Thu Feb 14 00:43:28 2013
@@ -23,6 +23,7 @@ import java.util.Random;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.diff.Diff.Container;
@@ -240,7 +241,8 @@ public class TestDiff {
   }
 
   static INode newINode(int n, int width) {
-    return new INodeDirectory(n, String.format("n%0" + width + "d", n), PERM);
+    byte[] name = DFSUtil.string2Bytes(String.format("n%0" + width + "d", n));
+    return new INodeDirectory(n, name, PERM, 0L);
   }
 
   static void create(INode inode, final List<INode> current, Diff<byte[], INode> diff) {



Mime
View raw message