hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1480838 [7/7] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/...
Date Thu, 09 May 2013 23:55:52 GMT
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Thu May  9 23:55:49 2013
@@ -22,6 +22,8 @@ import java.io.IOException;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
+import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
@@ -123,8 +126,11 @@ class ImageLoaderCurrent implements Imag
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
       -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-      -40, -41, -42};
+      -40, -41, -42, -43};
   private int imageVersion = 0;
+  
+  private final Map<Long, String> subtreeMap = new HashMap<Long, String>();
+  private final Map<Long, String> dirNodeMap = new HashMap<Long, String>();
 
   /* (non-Javadoc)
    * @see ImageLoader#canProcessVersion(int)
@@ -162,11 +168,18 @@ class ImageLoaderCurrent implements Imag
       if (LayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) {
         v.visit(ImageElement.TRANSACTION_ID, in.readLong());
       }
-
+      
       if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
         v.visit(ImageElement.LAST_INODE_ID, in.readLong());
       }
       
+      boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
+          imageVersion);
+      if (supportSnapshot) {
+        v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt());
+        v.visit(ImageElement.NUM_SNAPSHOTS_TOTAL, in.readInt());
+      }
+      
       if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
         boolean isCompressed = in.readBoolean();
         v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
@@ -183,7 +196,9 @@ class ImageLoaderCurrent implements Imag
           in = new DataInputStream(codec.createInputStream(in));
         }
       }
-      processINodes(in, v, numInodes, skipBlocks);
+      processINodes(in, v, numInodes, skipBlocks, supportSnapshot);
+      subtreeMap.clear();
+      dirNodeMap.clear();
 
       processINodesUC(in, v, skipBlocks);
 
@@ -271,6 +286,12 @@ class ImageLoaderCurrent implements Imag
       byte [] name = FSImageSerialization.readBytes(in);
       String n = new String(name, "UTF8");
       v.visit(ImageElement.INODE_PATH, n);
+      
+      if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
+        long inodeId = in.readLong();
+        v.visit(ImageElement.INODE_ID, inodeId);
+      }
+      
       v.visit(ImageElement.REPLICATION, in.readShort());
       v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
 
@@ -360,16 +381,22 @@ class ImageLoaderCurrent implements Imag
    * @param v Visitor to walk over INodes
    * @param numInodes Number of INodes stored in file
    * @param skipBlocks Process all the blocks within the INode?
+   * @param supportSnapshot Whether or not the imageVersion supports snapshot
    * @throws VisitException
    * @throws IOException
    */
   private void processINodes(DataInputStream in, ImageVisitor v,
-      long numInodes, boolean skipBlocks) throws IOException {
+      long numInodes, boolean skipBlocks, boolean supportSnapshot)
+      throws IOException {
     v.visitEnclosingElement(ImageElement.INODES,
         ImageElement.NUM_INODES, numInodes);
     
     if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
-      processLocalNameINodes(in, v, numInodes, skipBlocks);
+      if (!supportSnapshot) {
+        processLocalNameINodes(in, v, numInodes, skipBlocks);
+      } else {
+        processLocalNameINodesWithSnapshot(in, v, skipBlocks);
+      }
     } else { // full path name
       processFullNameINodes(in, v, numInodes, skipBlocks);
     }
@@ -390,7 +417,7 @@ class ImageLoaderCurrent implements Imag
   private void processLocalNameINodes(DataInputStream in, ImageVisitor v,
       long numInodes, boolean skipBlocks) throws IOException {
     // process root
-    processINode(in, v, skipBlocks, "");
+    processINode(in, v, skipBlocks, "", false);
     numInodes--;
     while (numInodes > 0) {
       numInodes -= processDirectory(in, v, skipBlocks);
@@ -400,40 +427,172 @@ class ImageLoaderCurrent implements Imag
   private int processDirectory(DataInputStream in, ImageVisitor v,
      boolean skipBlocks) throws IOException {
     String parentName = FSImageSerialization.readString(in);
+    return processChildren(in, v, skipBlocks, parentName);
+  }
+  
+  /**
+   * Process image with local path name and snapshot support
+   * 
+   * @param in image stream
+   * @param v visitor
+   * @param skipBlocks skip blocks or not
+   */
+  private void processLocalNameINodesWithSnapshot(DataInputStream in,
+      ImageVisitor v, boolean skipBlocks) throws IOException {
+    // process root
+    processINode(in, v, skipBlocks, "", false);
+    processDirectoryWithSnapshot(in, v, skipBlocks);
+  }
+  
+  /**
+   * Process directories when snapshot is supported.
+   */
+  private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v,
+      boolean skipBlocks) throws IOException {
+    // 1. load dir node id
+    long inodeId = in.readLong();
+    
+    String dirName = dirNodeMap.get(inodeId);
+    String oldValue = subtreeMap.put(inodeId, dirName);
+    if (oldValue != null) { // the subtree has been visited
+      return;
+    }
+    
+    // 2. load possible snapshots
+    processSnapshots(in, v, dirName);
+    // 3. load children nodes
+    processChildren(in, v, skipBlocks, dirName);
+    // 4. load possible directory diff list
+    processDirectoryDiffList(in, v, dirName);
+    // recursively process sub-directories
+    final int numSubTree = in.readInt();
+    for (int i = 0; i < numSubTree; i++) {
+      processDirectoryWithSnapshot(in, v, skipBlocks);
+    }
+  }
+  
+  /**
+   * Process snapshots of a snapshottable directory
+   */
+  private void processSnapshots(DataInputStream in, ImageVisitor v,
+      String rootName) throws IOException {
+    final int numSnapshots = in.readInt();
+    if (numSnapshots >= 0) {
+      v.visitEnclosingElement(ImageElement.SNAPSHOTS,
+          ImageElement.NUM_SNAPSHOTS, numSnapshots);
+      for (int i = 0; i < numSnapshots; i++) {
+        // process snapshot
+        v.visitEnclosingElement(ImageElement.SNAPSHOT);
+        v.visit(ImageElement.SNAPSHOT_ID, in.readInt());
+        // process root of snapshot
+        v.visitEnclosingElement(ImageElement.SNAPSHOT_ROOT);
+        processINode(in, v, true, rootName, false);
+        v.leaveEnclosingElement();
+        v.leaveEnclosingElement();
+      }
+      v.visit(ImageElement.SNAPSHOT_QUOTA, in.readInt());
+      v.leaveEnclosingElement();
+    }
+  }
+  
+  private void processDirectoryDiffList(DataInputStream in, ImageVisitor v,
+      String currentINodeName) throws IOException {
+    final int numDirDiff = in.readInt();
+    if (numDirDiff >= 0) {
+      v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFFS,
+          ImageElement.NUM_SNAPSHOT_DIR_DIFF, numDirDiff);
+      for (int i = 0; i < numDirDiff; i++) {
+        // process directory diffs in reverse chronological oder
+        processDirectoryDiff(in, v, currentINodeName); 
+      }
+      v.leaveEnclosingElement();
+    }
+  }
+  
+  private void processDirectoryDiff(DataInputStream in, ImageVisitor v,
+      String currentINodeName) throws IOException {
+    v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF);
+    String snapshot = FSImageSerialization.readString(in);
+    v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTROOT, snapshot);
+    v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, in.readInt());
+    
+    // process snapshotINode
+    boolean useRoot = in.readBoolean();
+    if (!useRoot) {
+      if (in.readBoolean()) {
+        v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE);
+        processINode(in, v, true, currentINodeName, true);
+        v.leaveEnclosingElement();
+      }
+    }
+    
+    // process createdList
+    int createdSize = in.readInt();
+    v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST,
+        ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, createdSize);
+    for (int i = 0; i < createdSize; i++) {
+      String createdNode = FSImageSerialization.readString(in);
+      v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CREATED_INODE, createdNode);
+    }
+    v.leaveEnclosingElement();
+    
+    // process deletedList
+    int deletedSize = in.readInt();
+    v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST,
+        ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE, deletedSize);
+    for (int i = 0; i < deletedSize; i++) {
+      v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETED_INODE);
+      processINode(in, v, false, currentINodeName, true);
+      v.leaveEnclosingElement();
+    }
+    v.leaveEnclosingElement();
+    v.leaveEnclosingElement();
+  }
+  
+  /** Process children under a directory */
+  private int processChildren(DataInputStream in, ImageVisitor v,
+      boolean skipBlocks, String parentName) throws IOException {
     int numChildren = in.readInt();
-    for (int i=0; i<numChildren; i++) {
-      processINode(in, v, skipBlocks, parentName);
+    for (int i = 0; i < numChildren; i++) {
+      processINode(in, v, skipBlocks, parentName, false);
     }
     return numChildren;
   }
   
-   /**
-    * Process image with full path name
-    * 
-    * @param in image stream
-    * @param v visitor
-    * @param numInodes number of indoes to read
-    * @param skipBlocks skip blocks or not
-    * @throws IOException if there is any error occurs
-    */
-   private void processFullNameINodes(DataInputStream in, ImageVisitor v,
-       long numInodes, boolean skipBlocks) throws IOException {
-     for(long i = 0; i < numInodes; i++) {
-       processINode(in, v, skipBlocks, null);
-     }
-   }
-   
-   /**
-    * Process an INode
-    * 
-    * @param in image stream
-    * @param v visitor
-    * @param skipBlocks skip blocks or not
-    * @param parentName the name of its parent node
-    * @throws IOException
-    */
+  /**
+   * Process image with full path name
+   * 
+   * @param in image stream
+   * @param v visitor
+   * @param numInodes number of indoes to read
+   * @param skipBlocks skip blocks or not
+   * @throws IOException if there is any error occurs
+   */
+  private void processFullNameINodes(DataInputStream in, ImageVisitor v,
+      long numInodes, boolean skipBlocks) throws IOException {
+    for(long i = 0; i < numInodes; i++) {
+      processINode(in, v, skipBlocks, null, false);
+    }
+  }
+ 
+  /**
+   * Process an INode
+   * 
+   * @param in image stream
+   * @param v visitor
+   * @param skipBlocks skip blocks or not
+   * @param parentName the name of its parent node
+   * @param isSnapshotCopy whether or not the inode is a snapshot copy
+   * @throws IOException
+   */
   private void processINode(DataInputStream in, ImageVisitor v,
-      boolean skipBlocks, String parentName) throws IOException {
+      boolean skipBlocks, String parentName, boolean isSnapshotCopy)
+      throws IOException {
+    boolean supportSnapshot = 
+        LayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
+    boolean supportInodeId = 
+        LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
+    
     v.visitEnclosingElement(ImageElement.INODE);
     String pathName = FSImageSerialization.readString(in);
     if (parentName != null) {  // local name
@@ -443,9 +602,11 @@ class ImageLoaderCurrent implements Imag
       }
     }
 
+    long inodeId = INodeId.GRANDFATHER_INODE_ID;
     v.visit(ImageElement.INODE_PATH, pathName);
-    if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
-      v.visit(ImageElement.INODE_ID, in.readLong());
+    if (supportInodeId) {
+      inodeId = in.readLong();
+      v.visit(ImageElement.INODE_ID, inodeId);
     }
     v.visit(ImageElement.REPLICATION, in.readShort());
     v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
@@ -455,21 +616,80 @@ class ImageLoaderCurrent implements Imag
     int numBlocks = in.readInt();
 
     processBlocks(in, v, numBlocks, skipBlocks);
-
-    // File or directory
-    if (numBlocks > 0 || numBlocks == -1) {
+    
+    if (numBlocks > 0) { // File
+      if (supportSnapshot) {
+        // process file diffs
+        processFileDiffList(in, v, parentName);
+        if (isSnapshotCopy) {
+          boolean underConstruction = in.readBoolean();
+          if (underConstruction) {
+            v.visit(ImageElement.CLIENT_NAME,
+                FSImageSerialization.readString(in));
+            v.visit(ImageElement.CLIENT_MACHINE,
+                FSImageSerialization.readString(in));
+          }
+        }
+      }
+    } else if (numBlocks == -1) { // Directory
+      if (supportSnapshot && supportInodeId) {
+        dirNodeMap.put(inodeId, pathName);
+      }
       v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
       if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
         v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
-    }
-    if (numBlocks == -2) {
+      if (supportSnapshot) {
+        boolean snapshottable = in.readBoolean();
+        if (!snapshottable) {
+          boolean withSnapshot = in.readBoolean();
+          v.visit(ImageElement.IS_WITHSNAPSHOT_DIR, Boolean.toString(withSnapshot));
+        } else {
+          v.visit(ImageElement.IS_SNAPSHOTTABLE_DIR, Boolean.toString(snapshottable));
+        }
+      }
+    } else if (numBlocks == -2) {
       v.visit(ImageElement.SYMLINK, Text.readString(in));
+    } else if (numBlocks == -3) { // reference node
+      final boolean isWithName = in.readBoolean();
+      int snapshotId = in.readInt();
+      if (isWithName) {
+        v.visit(ImageElement.SNAPSHOT_LAST_SNAPSHOT_ID, snapshotId);
+      } else {
+        v.visit(ImageElement.SNAPSHOT_DST_SNAPSHOT_ID, snapshotId);
+      }
+      
+      final boolean firstReferred = in.readBoolean();
+      if (firstReferred) {
+        v.visitEnclosingElement(ImageElement.SNAPSHOT_REF_INODE);
+        processINode(in, v, skipBlocks, parentName, isSnapshotCopy);
+        v.leaveEnclosingElement();  // referred inode    
+      } else {
+        v.visit(ImageElement.SNAPSHOT_REF_INODE_ID, in.readLong());
+      }
     }
 
     processPermission(in, v);
     v.leaveEnclosingElement(); // INode
   }
-
+  
+  private void processFileDiffList(DataInputStream in, ImageVisitor v,
+      String currentINodeName) throws IOException {
+    final int size = in.readInt();
+    if (size >= 0) {
+      v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFFS,
+          ImageElement.NUM_SNAPSHOT_FILE_DIFF, size);
+      String snapshot = FSImageSerialization.readString(in);
+      v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTROOT, snapshot);
+      v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
+      if (in.readBoolean()) {
+        v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE);
+        processINode(in, v, true, currentINodeName, true);
+        v.leaveEnclosingElement();
+      }
+      v.leaveEnclosingElement();
+    }
+  }
+  
   /**
    * Helper method to format dates during processing.
    * @param date Date as read from image file

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java Thu May  9 23:55:49 2013
@@ -82,7 +82,38 @@ abstract class ImageVisitor {
     DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
     TRANSACTION_ID,
     LAST_INODE_ID,
-    INODE_ID
+    INODE_ID,
+
+    SNAPSHOT_COUNTER,
+    NUM_SNAPSHOTS_TOTAL,
+    NUM_SNAPSHOTS,
+    SNAPSHOTS,
+    SNAPSHOT,
+    SNAPSHOT_ID,
+    SNAPSHOT_ROOT,
+    SNAPSHOT_QUOTA,
+    NUM_SNAPSHOT_DIR_DIFF,
+    SNAPSHOT_DIR_DIFFS,
+    SNAPSHOT_DIR_DIFF,
+    SNAPSHOT_DIFF_SNAPSHOTROOT,
+    SNAPSHOT_DIR_DIFF_CHILDREN_SIZE,
+    SNAPSHOT_DIFF_SNAPSHOTINODE,
+    SNAPSHOT_DIR_DIFF_CREATEDLIST,
+    SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE,
+    SNAPSHOT_DIR_DIFF_CREATED_INODE,
+    SNAPSHOT_DIR_DIFF_DELETEDLIST,
+    SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE,
+    SNAPSHOT_DIR_DIFF_DELETED_INODE,
+    IS_SNAPSHOTTABLE_DIR,
+    IS_WITHSNAPSHOT_DIR,
+    SNAPSHOT_FILE_DIFFS,
+    SNAPSHOT_FILE_DIFF,
+    NUM_SNAPSHOT_FILE_DIFF,
+    SNAPSHOT_FILE_SIZE,
+    SNAPSHOT_DST_SNAPSHOT_ID,
+    SNAPSHOT_LAST_SNAPSHOT_ID,
+    SNAPSHOT_REF_INODE_ID,
+    SNAPSHOT_REF_INODE
   }
   
   /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Thu May  9 23:55:49 2013
@@ -45,21 +45,16 @@ import org.apache.hadoop.fs.ContentSumma
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.ByteRangeInputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -95,13 +90,10 @@ import org.apache.hadoop.io.retry.RetryP
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenRenewer;
@@ -337,18 +329,7 @@ public class WebHdfsFileSystem extends F
       return ioe;
     }
 
-    final RemoteException re = (RemoteException)ioe;
-    return re.unwrapRemoteException(AccessControlException.class,
-        InvalidToken.class,
-        AuthenticationException.class,
-        AuthorizationException.class,
-        FileAlreadyExistsException.class,
-        FileNotFoundException.class,
-        ParentNotDirectoryException.class,
-        UnresolvedPathException.class,
-        SafeModeException.class,
-        DSQuotaExceededException.class,
-        NSQuotaExceededException.class);
+    return ((RemoteException)ioe).unwrapRemoteException();
   }
 
   /**

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1360400-1480829

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Thu May  9 23:55:49 2013
@@ -212,6 +212,21 @@ message GetListingResponseProto {
   optional DirectoryListingProto dirList = 1;
 }
 
+message GetSnapshottableDirListingRequestProto { // no input parameters
+}
+message GetSnapshottableDirListingResponseProto {
+  optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
+}
+
+message GetSnapshotDiffReportRequestProto {
+  required string snapshotRoot = 1;
+  required string fromSnapshot = 2;
+  required string toSnapshot = 3;
+}
+message GetSnapshotDiffReportResponseProto {
+  required SnapshotDiffReportProto diffReport = 1;
+}
+
 message RenewLeaseRequestProto {
   required string clientName = 1;
 }
@@ -434,6 +449,46 @@ message GetDataEncryptionKeyResponseProt
   optional DataEncryptionKeyProto dataEncryptionKey = 1;
 }
 
+message CreateSnapshotRequestProto {
+  required string snapshotRoot = 1;
+  optional string snapshotName = 2;
+}
+
+message CreateSnapshotResponseProto {
+  required string snapshotPath = 1;
+}
+
+message RenameSnapshotRequestProto {
+  required string snapshotRoot = 1;
+  required string snapshotOldName = 2;
+  required string snapshotNewName = 3;
+}
+
+message RenameSnapshotResponseProto { // void response
+}
+
+message AllowSnapshotRequestProto {
+  required string snapshotRoot = 1;
+}
+
+message AllowSnapshotResponseProto {
+}
+
+message DisallowSnapshotRequestProto {
+  required string snapshotRoot = 1;
+}
+
+message DisallowSnapshotResponseProto {
+}
+
+message DeleteSnapshotRequestProto {
+  required string snapshotRoot = 1;
+  required string snapshotName = 2;
+}
+
+message DeleteSnapshotResponseProto { // void response
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -507,6 +562,20 @@ service ClientNamenodeProtocol {
       returns(SetBalancerBandwidthResponseProto);
   rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
       returns(GetDataEncryptionKeyResponseProto);
+  rpc createSnapshot(CreateSnapshotRequestProto)
+      returns(CreateSnapshotResponseProto);
+  rpc renameSnapshot(RenameSnapshotRequestProto)
+      returns(RenameSnapshotResponseProto);
+  rpc allowSnapshot(AllowSnapshotRequestProto)
+      returns(AllowSnapshotResponseProto);
+  rpc disallowSnapshot(DisallowSnapshotRequestProto)
+      returns(DisallowSnapshotResponseProto);   
+  rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
+      returns(GetSnapshottableDirListingResponseProto);
+  rpc deleteSnapshot(DeleteSnapshotRequestProto)
+      returns(DeleteSnapshotResponseProto);
+  rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto)
+      returns(GetSnapshotDiffReportResponseProto);
   rpc isFileClosed(IsFileClosedRequestProto)
       returns(IsFileClosedResponseProto);
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Thu May  9 23:55:49 2013
@@ -145,7 +145,7 @@ message LocatedBlocksProto {
 
 
 /**
- * Status of a file, directory  or symlink
+ * Status of a file, directory or symlink
  * Optionally includes a file's block locations if requested by client on the rpc call.
  */
 message HdfsFileStatusProto {
@@ -210,6 +210,46 @@ message DirectoryListingProto {
 }
 
 /**
+ * Status of a snapshottable directory: besides the normal information for 
+ * a directory status, also include snapshot quota, number of snapshots, and
+ * the full path of the parent directory. 
+ */
+message SnapshottableDirectoryStatusProto {
+  required HdfsFileStatusProto dirStatus = 1;
+
+  // Fields specific for snapshottable directory
+  required uint32 snapshot_quota = 2;
+  required uint32 snapshot_number = 3;
+  required bytes parent_fullpath = 4;
+}
+
+/**
+ * Snapshottable directory listing
+ */
+message SnapshottableDirectoryListingProto {
+  repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
+}
+
+/**
+ * Snapshot diff report entry
+ */
+message SnapshotDiffReportEntryProto {
+  required bytes fullpath = 1;
+  required string modificationLabel = 2;
+}
+
+/**
+ * Snapshot diff report
+ */
+message SnapshotDiffReportProto {
+  // full path of the directory where snapshots were taken
+  required string snapshotRoot = 1;
+  required string fromSnapshot = 2;
+  required string toSnapshot = 3;
+  repeated SnapshotDiffReportEntryProto diffReportEntries = 4;
+}
+
+/**
  * Common node information shared by all the nodes in the cluster
  */
 message StorageInfoProto {
@@ -374,3 +414,17 @@ message VersionResponseProto {
   required NamespaceInfoProto info = 1;
 }
 
+/**
+ * Information related to a snapshot
+ * TODO: add more information
+ */
+message SnapshotInfoProto {
+  required string snapshotName = 1;
+  required string snapshotRoot = 2;
+  required FsPermissionProto permission = 3;
+  required string owner = 4;
+  required string group = 5;
+  required string createTime = 6;
+  // TODO: do we need access time?
+}
+

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1360400-1480829

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1360400-1480829

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1360400-1480829

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1360400-1480829

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Thu May  9 23:55:49 2013
@@ -614,6 +614,25 @@ public class DFSTestUtil {
   }
   
   /**
+   * Append specified length of bytes to a given file
+   * @param fs The file system
+   * @param p Path of the file to append
+   * @param length Length of bytes to append to the file
+   * @throws IOException
+   */
+  public static void appendFile(FileSystem fs, Path p, int length)
+      throws IOException {
+    assert fs.exists(p);
+    assert length >= 0;
+    byte[] toAppend = new byte[length];
+    Random random = new Random();
+    random.nextBytes(toAppend);
+    FSDataOutputStream out = fs.append(p);
+    out.write(toAppend);
+    out.close();
+  }
+  
+  /**
    * @return url content as string (UTF-8 encoding assumed)
    */
   public static String urlGet(URL url) throws IOException {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Thu May  9 23:55:49 2013
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.tools.DFSA
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
 import org.junit.Test;
 
 /** A class for testing quota-related commands */
@@ -171,15 +172,13 @@ public class TestQuota {
       fout = dfs.create(childFile1, replication);
       
       // 10.s: but writing fileLen bytes should result in an quota exception
-      hasException = false;
       try {
         fout.write(new byte[fileLen]);
         fout.close();
+        Assert.fail();
       } catch (QuotaExceededException e) {
-        hasException = true;
         IOUtils.closeStream(fout);
       }
-      assertTrue(hasException);
       
       //delete the file
       dfs.delete(childFile1, false);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Thu May  9 23:55:49 2013
@@ -63,7 +63,7 @@ public class CreateEditsLog {
     PermissionStatus p = new PermissionStatus("joeDoe", "people",
                                       new FsPermission((short)0777));
     INodeDirectory dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
-        p, 0L);
+        null, p, 0L);
     editLog.logMkDir(BASE_PATH, dirInode);
     long blockSize = 10;
     BlockInfo[] blocks = new BlockInfo[blocksPerFile];
@@ -92,7 +92,7 @@ public class CreateEditsLog {
       // Log the new sub directory in edits
       if ((iF % nameGenerator.getFilesPerDirectory())  == 0) {
         String currentDir = nameGenerator.getCurrentDir();
-        dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, p, 0L);
+        dirInode = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, null, p, 0L);
         editLog.logMkDir(currentDir, dirInode);
       }
       editLog.logOpenFile(filePath, new INodeFileUnderConstruction(

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Thu May  9 23:55:49 2013
@@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -217,8 +218,8 @@ public abstract class FSImageTestUtil {
         FsPermission.createImmutable((short)0755));
     for (int i = 1; i <= numDirs; i++) {
       String dirName = "dir" + i;
-      INodeDirectory dir = new INodeDirectory(newInodeId + i - 1, dirName,
-          perms);
+      INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
+          DFSUtil.string2Bytes(dirName), perms, 0L);
       editLog.logMkDir("/" + dirName, dir);
     }
     editLog.logSync();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Thu May  9 23:55:49 2013
@@ -155,6 +155,19 @@ public class OfflineEditsViewerHelper {
     // OP_MKDIR 3
     Path pathDirectoryMkdir = new Path("/directory_mkdir");
     dfs.mkdirs(pathDirectoryMkdir);
+    // OP_ALLOW_SNAPSHOT 29
+    dfs.allowSnapshot(pathDirectoryMkdir);
+    // OP_DISALLOW_SNAPSHOT 30
+    dfs.disallowSnapshot(pathDirectoryMkdir);
+    // OP_CREATE_SNAPSHOT 26
+    String ssName = "snapshot1";
+    dfs.allowSnapshot(pathDirectoryMkdir);
+    dfs.createSnapshot(pathDirectoryMkdir, ssName);
+    // OP_RENAME_SNAPSHOT 28
+    String ssNewName = "snapshot2";
+    dfs.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
+    // OP_DELETE_SNAPSHOT 27
+    dfs.deleteSnapshot(pathDirectoryMkdir, ssNewName);
     // OP_SET_REPLICATION 4
     s = dfs.create(pathFileCreate);
     s.close();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Thu May  9 23:55:49 2013
@@ -51,7 +51,6 @@ public class TestFSDirectory {
 
   private final Path sub11 = new Path(sub1, "sub11");
   private final Path file3 = new Path(sub11, "file3");
-  private final Path file4 = new Path(sub1, "z_file4");
   private final Path file5 = new Path(sub1, "z_file5");
 
   private final Path sub2 = new Path(dir, "sub2");
@@ -106,27 +105,13 @@ public class TestFSDirectory {
 
     for(; (line = in.readLine()) != null; ) {
       line = line.trim();
-      Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
-          || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
-      checkClassName(line);
+      if (!line.isEmpty() && !line.contains("snapshot")) {
+        Assert.assertTrue("line=" + line,
+            line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
+            || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
+        checkClassName(line);
+      }
     }
-
-    LOG.info("Create a new file " + file4);
-    DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed);
-
-    final StringBuffer b2 = root.dumpTreeRecursively();
-    System.out.println("b2=" + b2);
-
-    int i = 0;
-    int j = b1.length() - 1;
-    for(; b1.charAt(i) == b2.charAt(i); i++);
-    int k = b2.length() - 1;
-    for(; b1.charAt(j) == b2.charAt(k); j--, k--);
-    final String diff = b2.substring(i, k + 1);
-    System.out.println("i=" + i + ", j=" + j + ", k=" + k);
-    System.out.println("diff=" + diff);
-    Assert.assertTrue(i > j);
-    Assert.assertTrue(diff.contains(file4.getName()));
   }
   
   @Test
@@ -134,7 +119,7 @@ public class TestFSDirectory {
     fsdir.reset();
     Assert.assertFalse(fsdir.isReady());
     final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
-    Assert.assertTrue(root.getChildrenList().isEmpty());
+    Assert.assertTrue(root.getChildrenList(null).isEmpty());
     fsdir.imageLoadComplete();
     Assert.assertTrue(fsdir.isReady());
   }
@@ -143,8 +128,7 @@ public class TestFSDirectory {
     int i = line.lastIndexOf('(');
     int j = line.lastIndexOf('@');
     final String classname = line.substring(i+1, j);
-    Assert.assertTrue(classname.equals(INodeFile.class.getSimpleName())
-        || classname.equals(INodeDirectory.class.getSimpleName())
-        || classname.equals(INodeDirectoryWithQuota.class.getSimpleName()));
+    Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
+        || classname.startsWith(INodeDirectory.class.getSimpleName()));
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Thu May  9 23:55:49 2013
@@ -31,9 +31,12 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.FSLimitException.IllegalNameException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.junit.Before;
 import org.junit.Test;
@@ -103,6 +106,7 @@ public class TestFsLimits {
     addChildWithName("333", null);
     addChildWithName("4444", null);
     addChildWithName("55555", null);
+    addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class);
   }
 
   @Test
@@ -142,6 +146,7 @@ public class TestFsLimits {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
     fsIsReady = false;
     
+    addChildWithName(HdfsConstants.DOT_SNAPSHOT_DIR, IllegalNameException.class);
     addChildWithName("1", null);
     addChildWithName("22", null);
     addChildWithName("333", null);
@@ -154,13 +159,15 @@ public class TestFsLimits {
     if (fs == null) fs = new MockFSDirectory();
 
     INode child = new INodeDirectory(getMockNamesystem().allocateNewInodeId(),
-        name, perms);
-    child.setLocalName(name);
+        DFSUtil.string2Bytes(name), perms, 0L);
     
     Class<?> generated = null;
     try {
-      fs.verifyFsLimits(inodes, 1, child);
-      rootInode.addChild(child, false);
+      fs.verifyMaxComponentLength(child.getLocalNameBytes(), inodes, 1);
+      fs.verifyMaxDirItems(inodes, 1);
+      fs.verifyINodeName(child.getLocalNameBytes());
+
+      rootInode.addChild(child);
     } catch (QuotaExceededException e) {
       generated = e.getClass();
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Thu May  9 23:55:49 2013
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.junit.Test;
@@ -64,10 +65,15 @@ public class TestINodeFile {
   static final short BLOCKBITS = 48;
   static final long BLKSIZE_MAXVALUE = ~(0xffffL << BLOCKBITS);
 
-  private String userName = "Test";
+  private final PermissionStatus perm = new PermissionStatus(
+      "userName", null, FsPermission.getDefault());
   private short replication;
   private long preferredBlockSize;
 
+  INodeFile createINodeFile(short replication, long preferredBlockSize) {
+    return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
+        null, replication, preferredBlockSize);
+  }
   /**
    * Test for the Replication value. Sets a value and checks if it was set
    * correct.
@@ -76,11 +82,9 @@ public class TestINodeFile {
   public void testReplication () {
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
-        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
     assertEquals("True has to be returned in this case", replication,
-                 inf.getBlockReplication());
+                 inf.getFileReplication());
   }
 
   /**
@@ -93,9 +97,7 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = -1;
     preferredBlockSize = 128*1024*1024;
-    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
-        null, FsPermission.getDefault()), null, replication, 0L, 0L,
-        preferredBlockSize);
+    createINodeFile(replication, preferredBlockSize);
   }
 
   /**
@@ -106,9 +108,7 @@ public class TestINodeFile {
   public void testPreferredBlockSize () {
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
-        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
    assertEquals("True has to be returned in this case", preferredBlockSize,
         inf.getPreferredBlockSize());
  }
@@ -117,9 +117,7 @@ public class TestINodeFile {
   public void testPreferredBlockSizeUpperBound () {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
-        new PermissionStatus(userName, null, FsPermission.getDefault()), null,
-        replication, 0L, 0L, preferredBlockSize);
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
     assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
                  inf.getPreferredBlockSize());
   }
@@ -134,9 +132,7 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = -1;
-    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
-        null, FsPermission.getDefault()), null, replication, 0L, 0L,
-        preferredBlockSize);
+    createINodeFile(replication, preferredBlockSize);
   } 
 
   /**
@@ -149,41 +145,31 @@ public class TestINodeFile {
               throws IllegalArgumentException {
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE+1;
-    new INodeFile(INodeId.GRANDFATHER_INODE_ID, new PermissionStatus(userName,
-        null, FsPermission.getDefault()), null, replication, 0L, 0L,
-        preferredBlockSize);
+    createINodeFile(replication, preferredBlockSize);
  }
 
   @Test
   public void testGetFullPathName() {
-    PermissionStatus perms = new PermissionStatus(
-      userName, null, FsPermission.getDefault());
-
     replication = 3;
     preferredBlockSize = 128*1024*1024;
-    INodeFile inf = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perms, null,
-        replication, 0L, 0L, preferredBlockSize);
-    inf.setLocalName("f");
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
+    inf.setLocalName(DFSUtil.string2Bytes("f"));
 
     INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
-        INodeDirectory.ROOT_NAME, perms);
-    INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, "d",
-        perms);
+        INodeDirectory.ROOT_NAME, perm, 0L);
+    INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
+        DFSUtil.string2Bytes("d"), perm, 0L);
 
     assertEquals("f", inf.getFullPathName());
-    assertEquals("", inf.getLocalParentDir());
 
-    dir.addChild(inf, false);
+    dir.addChild(inf);
     assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
-    assertEquals("d", inf.getLocalParentDir());
     
-    root.addChild(dir, false);
+    root.addChild(dir);
     assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
     assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
 
     assertEquals(Path.SEPARATOR, root.getFullPathName());
-    assertEquals(Path.SEPARATOR, root.getLocalParentDir());
-    
   }
   
   /**
@@ -215,10 +201,14 @@ public class TestINodeFile {
       // Check the full path name of the INode associating with the file
       INode fnode = fsdir.getINode(file.toString());
       assertEquals(file.toString(), fnode.getFullPathName());
-
+      
       // Call FSDirectory#unprotectedSetQuota which calls
       // INodeDirectory#replaceChild
       dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
+      INode dirNode = fsdir.getINode(dir.toString());
+      assertEquals(dir.toString(), dirNode.getFullPathName());
+      assertTrue(dirNode instanceof INodeDirectoryWithQuota);
+      
       final Path newDir = new Path("/newdir");
       final Path newFile = new Path(newDir, "file");
       // Also rename dir
@@ -236,27 +226,14 @@ public class TestINodeFile {
   }
   
   @Test
-  public void testAppendBlocks() {
+  public void testConcatBlocks() {
     INodeFile origFile = createINodeFiles(1, "origfile")[0];
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
 
     INodeFile[] appendFiles =   createINodeFiles(4, "appendfile");
-    origFile.appendBlocks(appendFiles, getTotalBlocks(appendFiles));
+    origFile.concatBlocks(appendFiles);
     assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
   }
-
-  /** 
-   * Gives the count of blocks for a given number of files
-   * @param files Array of INode files
-   * @return total count of blocks
-   */
-  private int getTotalBlocks(INodeFile[] files) {
-    int nBlocks=0;
-    for(int i=0; i < files.length; i++) {
-       nBlocks += files[i].numBlocks();
-    }
-    return nBlocks;
-  }
   
   /** 
    * Creates the required number of files with one block each
@@ -271,11 +248,9 @@ public class TestINodeFile {
     preferredBlockSize = 128 * 1024 * 1024;
     INodeFile[] iNodes = new INodeFile[nCount];
     for (int i = 0; i < nCount; i++) {
-      PermissionStatus perms = new PermissionStatus(userName, null,
-          FsPermission.getDefault());
-      iNodes[i] = new INodeFile(i, perms, null, replication, 0L, 0L,
+      iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
           preferredBlockSize);
-      iNodes[i].setLocalName(fileNamePrefix +  Integer.toString(i));
+      iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
       BlockInfo newblock = new BlockInfo(replication);
       iNodes[i].addBlock(newblock);
     }
@@ -291,8 +266,6 @@ public class TestINodeFile {
   @Test
   public void testValueOf () throws IOException {
     final String path = "/testValueOf";
-    final PermissionStatus perm = new PermissionStatus(
-        userName, null, FsPermission.getDefault());
     final short replication = 3;
 
     {//cast from null
@@ -324,8 +297,7 @@ public class TestINodeFile {
     }
 
     {//cast from INodeFile
-      final INode from = new INodeFile(INodeId.GRANDFATHER_INODE_ID, perm,
-          null, replication, 0L, 0L, preferredBlockSize);
+      final INode from = createINodeFile(replication, preferredBlockSize);
 
      //cast to INodeFile, should success
       final INodeFile f = INodeFile.valueOf(from, path);
@@ -372,8 +344,8 @@ public class TestINodeFile {
     }
 
     {//cast from INodeDirectory
-      final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, perm,
-          0L);
+      final INode from = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID, null,
+          perm, 0L);
 
       //cast to INodeFile, should fail
       try {
@@ -817,13 +789,13 @@ public class TestINodeFile {
   /**
    * For a given path, build a tree of INodes and return the leaf node.
    */
-  private INode createTreeOfInodes(String path) {
+  private INode createTreeOfInodes(String path) throws QuotaExceededException {
     byte[][] components = INode.getPathComponents(path);
     FsPermission perm = FsPermission.createImmutable((short)0755);
     PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
     
     long id = 0;
-    INodeDirectory prev = new INodeDirectory(++id, "", permstatus);
+    INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
     INodeDirectory dir = null;
     for (byte[] component : components) {
       if (component.length == 0) {
@@ -831,7 +803,7 @@ public class TestINodeFile {
       }
       System.out.println("Adding component " + DFSUtil.bytes2String(component));
       dir = new INodeDirectory(++id, component, permstatus, 0);
-      prev.addChild(dir, false);
+      prev.addChild(dir, false, null, null);
       prev = dir;
     }
     return dir; // Last Inode in the chain
@@ -849,7 +821,7 @@ public class TestINodeFile {
    * Test for {@link FSDirectory#getPathComponents(INode)}
    */
   @Test
-  public void testGetPathFromInode() {
+  public void testGetPathFromInode() throws QuotaExceededException {
     String path = "/a/b/c";
     INode inode = createTreeOfInodes(path);
     byte[][] expected = INode.getPathComponents(path);
@@ -861,7 +833,7 @@ public class TestINodeFile {
    * Tests for {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
    */
   @Test
-  public void testInodePath() throws FileNotFoundException {
+  public void testInodePath() throws IOException {
     // For a non .inodes path the regular components are returned
     String path = "/a/b/c";
     INode inode = createTreeOfInodes(path);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Thu May  9 23:55:49 2013
@@ -289,6 +289,7 @@ public class TestOfflineImageViewer {
     while((line = br.readLine()) != null) 
       readLsLine(line, fileContents);
     
+    br.close();
     return fileContents;
   }
   
@@ -391,6 +392,7 @@ public class TestOfflineImageViewer {
     File outputFile = new File(ROOT, "/fileDistributionCheckOutput");
 
     int totalFiles = 0;
+    BufferedReader reader = null;
     try {
       copyFile(originalFsimage, testFile);
       ImageVisitor v = new FileDistributionVisitor(outputFile.getPath(), 0, 0);
@@ -399,7 +401,7 @@ public class TestOfflineImageViewer {
 
       oiv.go();
 
-      BufferedReader reader = new BufferedReader(new FileReader(outputFile));
+      reader = new BufferedReader(new FileReader(outputFile));
       String line = reader.readLine();
       assertEquals(line, "Size\tNumFiles");
       while((line = reader.readLine()) != null) {
@@ -408,6 +410,9 @@ public class TestOfflineImageViewer {
         totalFiles += Integer.parseInt(row[1]);
       }
     } finally {
+      if (reader != null) {
+        reader.close();
+      }
       if(testFile.exists()) testFile.delete();
       if(outputFile.exists()) outputFile.delete();
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1480838&r1=1480837&r2=1480838&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Thu May  9 23:55:49 2013
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-42</EDITS_VERSION>
+  <EDITS_VERSION>-43</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1331096884634</EXPIRY_DATE>
-        <KEY>a34bf5</KEY>
+        <EXPIRY_DATE>1366591664956</EXPIRY_DATE>
+        <KEY>f80c8ce0a9ff77d5</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,33 +24,26 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1331096884637</EXPIRY_DATE>
-        <KEY>dd6305</KEY>
+        <EXPIRY_DATE>1366591664958</EXPIRY_DATE>
+        <KEY>75e0c5176b531b18</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_GENSTAMP</OPCODE>
-    <DATA>
-      <TXID>4</TXID>
-      <GENSTAMP>1001</GENSTAMP>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>5</TXID>
+      <TXID>4</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1002</INODEID>
-      <PATH>/file_create</PATH>
+      <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685834</MTIME>
-      <ATIME>1330405685834</ATIME>
+      <MTIME>1365900465930</MTIME>
+      <ATIME>1365900465930</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-2143415023_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -59,18 +52,18 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>6</TXID>
+      <TXID>5</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
-      <PATH>/file_create</PATH>
+      <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685848</MTIME>
-      <ATIME>1330405685834</ATIME>
+      <MTIME>1365900465946</MTIME>
+      <ATIME>1365900465930</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -79,59 +72,98 @@
   <RECORD>
     <OPCODE>OP_RENAME_OLD</OPCODE>
     <DATA>
-      <TXID>7</TXID>
+      <TXID>6</TXID>
       <LENGTH>0</LENGTH>
-      <SRC>/file_create</SRC>
+      <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1330405685852</TIMESTAMP>
+      <TIMESTAMP>1365900465949</TIMESTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_DELETE</OPCODE>
     <DATA>
-      <TXID>8</TXID>
+      <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1330405685857</TIMESTAMP>
+      <TIMESTAMP>1365900465953</TIMESTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MKDIR</OPCODE>
     <DATA>
-      <TXID>9</TXID>
+      <TXID>8</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1003</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1330405685861</TIMESTAMP>
+      <TIMESTAMP>1365900465956</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_GENSTAMP</OPCODE>
+    <OPCODE>OP_ALLOW_SNAPSHOT</OPCODE>
+    <DATA>
+      <TXID>9</TXID>
+      <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_DISALLOW_SNAPSHOT</OPCODE>
     <DATA>
       <TXID>10</TXID>
-      <GENSTAMP>1002</GENSTAMP>
+      <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_ADD</OPCODE>
+    <OPCODE>OP_ALLOW_SNAPSHOT</OPCODE>
     <DATA>
       <TXID>11</TXID>
+      <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_CREATE_SNAPSHOT</OPCODE>
+    <DATA>
+      <TXID>12</TXID>
+      <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
+      <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_RENAME_SNAPSHOT</OPCODE>
+    <DATA>
+      <TXID>13</TXID>
+      <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
+      <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
+      <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_DELETE_SNAPSHOT</OPCODE>
+    <DATA>
+      <TXID>14</TXID>
+      <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
+      <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ADD</OPCODE>
+    <DATA>
+      <TXID>15</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1004</INODEID>
-      <PATH>/file_create</PATH>
+      <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685866</MTIME>
-      <ATIME>1330405685866</ATIME>
+      <MTIME>1365900465976</MTIME>
+      <ATIME>1365900465976</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-2143415023_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -140,18 +172,18 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>12</TXID>
+      <TXID>16</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
-      <PATH>/file_create</PATH>
+      <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685868</MTIME>
-      <ATIME>1330405685866</ATIME>
+      <MTIME>1365900465978</MTIME>
+      <ATIME>1365900465976</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -160,33 +192,33 @@
   <RECORD>
     <OPCODE>OP_SET_REPLICATION</OPCODE>
     <DATA>
-      <TXID>13</TXID>
-      <PATH>/file_create</PATH>
+      <TXID>17</TXID>
+      <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_PERMISSIONS</OPCODE>
     <DATA>
-      <TXID>14</TXID>
-      <SRC>/file_create</SRC>
+      <TXID>18</TXID>
+      <SRC>/file_create_u\0001;F431</SRC>
       <MODE>511</MODE>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_OWNER</OPCODE>
     <DATA>
-      <TXID>15</TXID>
-      <SRC>/file_create</SRC>
+      <TXID>19</TXID>
+      <SRC>/file_create_u\0001;F431</SRC>
       <USERNAME>newOwner</USERNAME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_TIMES</OPCODE>
     <DATA>
-      <TXID>16</TXID>
+      <TXID>20</TXID>
       <LENGTH>0</LENGTH>
-      <PATH>/file_create</PATH>
+      <PATH>/file_create_u\0001;F431</PATH>
       <MTIME>1285195527000</MTIME>
       <ATIME>1285195527000</ATIME>
     </DATA>
@@ -194,7 +226,7 @@
   <RECORD>
     <OPCODE>OP_SET_QUOTA</OPCODE>
     <DATA>
-      <TXID>17</TXID>
+      <TXID>21</TXID>
       <SRC>/directory_mkdir</SRC>
       <NSQUOTA>1000</NSQUOTA>
       <DSQUOTA>-1</DSQUOTA>
@@ -203,36 +235,29 @@
   <RECORD>
     <OPCODE>OP_RENAME</OPCODE>
     <DATA>
-      <TXID>18</TXID>
+      <TXID>22</TXID>
       <LENGTH>0</LENGTH>
-      <SRC>/file_create</SRC>
+      <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1330405685882</TIMESTAMP>
+      <TIMESTAMP>1365900465991</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_GENSTAMP</OPCODE>
-    <DATA>
-      <TXID>19</TXID>
-      <GENSTAMP>1003</GENSTAMP>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>20</TXID>
+      <TXID>23</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1005</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685889</MTIME>
-      <ATIME>1330405685889</ATIME>
+      <MTIME>1365900465996</MTIME>
+      <ATIME>1365900465996</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-2143415023_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -241,132 +266,125 @@
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>21</TXID>
-      <GENSTAMP>1004</GENSTAMP>
+      <TXID>24</TXID>
+      <GENSTAMP>1001</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>22</TXID>
+      <TXID>25</TXID>
       <PATH>/file_concat_target</PATH>
       <BLOCK>
-        <BLOCK_ID>-7144805496741076283</BLOCK_ID>
+        <BLOCK_ID>7730270391831370404</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1004</GENSTAMP>
+        <GENSTAMP>1001</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>23</TXID>
-      <GENSTAMP>1005</GENSTAMP>
+      <TXID>26</TXID>
+      <GENSTAMP>1002</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>24</TXID>
+      <TXID>27</TXID>
       <PATH>/file_concat_target</PATH>
       <BLOCK>
-        <BLOCK_ID>-7144805496741076283</BLOCK_ID>
+        <BLOCK_ID>7730270391831370404</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1004</GENSTAMP>
+        <GENSTAMP>1001</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-4125931756867080767</BLOCK_ID>
+        <BLOCK_ID>7070364572574548346</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1005</GENSTAMP>
+        <GENSTAMP>1002</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>25</TXID>
-      <GENSTAMP>1006</GENSTAMP>
+      <TXID>28</TXID>
+      <GENSTAMP>1003</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>26</TXID>
+      <TXID>29</TXID>
       <PATH>/file_concat_target</PATH>
       <BLOCK>
-        <BLOCK_ID>-7144805496741076283</BLOCK_ID>
+        <BLOCK_ID>7730270391831370404</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1004</GENSTAMP>
+        <GENSTAMP>1001</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-4125931756867080767</BLOCK_ID>
+        <BLOCK_ID>7070364572574548346</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1005</GENSTAMP>
+        <GENSTAMP>1002</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>1562413691487277050</BLOCK_ID>
+        <BLOCK_ID>-2436647467986907584</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1006</GENSTAMP>
+        <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>27</TXID>
+      <TXID>30</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685978</MTIME>
-      <ATIME>1330405685889</ATIME>
+      <MTIME>1365900466070</MTIME>
+      <ATIME>1365900465996</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
-        <BLOCK_ID>-7144805496741076283</BLOCK_ID>
+        <BLOCK_ID>7730270391831370404</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1004</GENSTAMP>
+        <GENSTAMP>1001</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-4125931756867080767</BLOCK_ID>
+        <BLOCK_ID>7070364572574548346</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1005</GENSTAMP>
+        <GENSTAMP>1002</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>1562413691487277050</BLOCK_ID>
+        <BLOCK_ID>-2436647467986907584</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1006</GENSTAMP>
+        <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_GENSTAMP</OPCODE>
-    <DATA>
-      <TXID>28</TXID>
-      <GENSTAMP>1007</GENSTAMP>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>29</TXID>
+      <TXID>31</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1006</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405685983</MTIME>
-      <ATIME>1330405685983</ATIME>
+      <MTIME>1365900466074</MTIME>
+      <ATIME>1365900466074</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-2143415023_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -375,132 +393,125 @@
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>30</TXID>
-      <GENSTAMP>1008</GENSTAMP>
+      <TXID>32</TXID>
+      <GENSTAMP>1004</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>31</TXID>
+      <TXID>33</TXID>
       <PATH>/file_concat_0</PATH>
       <BLOCK>
-        <BLOCK_ID>6084289468290363112</BLOCK_ID>
+        <BLOCK_ID>-8902070029031700083</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1008</GENSTAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>32</TXID>
-      <GENSTAMP>1009</GENSTAMP>
+      <TXID>34</TXID>
+      <GENSTAMP>1005</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>33</TXID>
+      <TXID>35</TXID>
       <PATH>/file_concat_0</PATH>
       <BLOCK>
-        <BLOCK_ID>6084289468290363112</BLOCK_ID>
+        <BLOCK_ID>-8902070029031700083</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1008</GENSTAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-4219431127125026105</BLOCK_ID>
+        <BLOCK_ID>1791253399175285670</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1009</GENSTAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>34</TXID>
-      <GENSTAMP>1010</GENSTAMP>
+      <TXID>36</TXID>
+      <GENSTAMP>1006</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>35</TXID>
+      <TXID>37</TXID>
       <PATH>/file_concat_0</PATH>
       <BLOCK>
-        <BLOCK_ID>6084289468290363112</BLOCK_ID>
+        <BLOCK_ID>-8902070029031700083</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1008</GENSTAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-4219431127125026105</BLOCK_ID>
+        <BLOCK_ID>1791253399175285670</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1009</GENSTAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-1765119074945211374</BLOCK_ID>
+        <BLOCK_ID>3333415502075331416</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1010</GENSTAMP>
+        <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>36</TXID>
+      <TXID>38</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405686013</MTIME>
-      <ATIME>1330405685983</ATIME>
+      <MTIME>1365900466094</MTIME>
+      <ATIME>1365900466074</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
-        <BLOCK_ID>6084289468290363112</BLOCK_ID>
+        <BLOCK_ID>-8902070029031700083</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1008</GENSTAMP>
+        <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-4219431127125026105</BLOCK_ID>
+        <BLOCK_ID>1791253399175285670</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1009</GENSTAMP>
+        <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-1765119074945211374</BLOCK_ID>
+        <BLOCK_ID>3333415502075331416</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1010</GENSTAMP>
+        <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_GENSTAMP</OPCODE>
-    <DATA>
-      <TXID>37</TXID>
-      <GENSTAMP>1011</GENSTAMP>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
-      <TXID>38</TXID>
+      <TXID>39</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1007</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405686017</MTIME>
-      <ATIME>1330405686017</ATIME>
+      <MTIME>1365900466097</MTIME>
+      <ATIME>1365900466097</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-2143415023_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -509,105 +520,105 @@
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>39</TXID>
-      <GENSTAMP>1012</GENSTAMP>
+      <TXID>40</TXID>
+      <GENSTAMP>1007</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>40</TXID>
+      <TXID>41</TXID>
       <PATH>/file_concat_1</PATH>
       <BLOCK>
-        <BLOCK_ID>-7448471719302683860</BLOCK_ID>
+        <BLOCK_ID>-406914295015578364</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
+        <GENSTAMP>1007</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>41</TXID>
-      <GENSTAMP>1013</GENSTAMP>
+      <TXID>42</TXID>
+      <GENSTAMP>1008</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>42</TXID>
+      <TXID>43</TXID>
       <PATH>/file_concat_1</PATH>
       <BLOCK>
-        <BLOCK_ID>-7448471719302683860</BLOCK_ID>
+        <BLOCK_ID>-406914295015578364</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
+        <GENSTAMP>1007</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-8051065559769974521</BLOCK_ID>
+        <BLOCK_ID>208049244517243116</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1013</GENSTAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
-      <TXID>43</TXID>
-      <GENSTAMP>1014</GENSTAMP>
+      <TXID>44</TXID>
+      <GENSTAMP>1009</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
-      <TXID>44</TXID>
+      <TXID>45</TXID>
       <PATH>/file_concat_1</PATH>
       <BLOCK>
-        <BLOCK_ID>-7448471719302683860</BLOCK_ID>
+        <BLOCK_ID>-406914295015578364</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
+        <GENSTAMP>1007</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-8051065559769974521</BLOCK_ID>
+        <BLOCK_ID>208049244517243116</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1013</GENSTAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>3808670437711973616</BLOCK_ID>
+        <BLOCK_ID>-1546331983133724845</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1014</GENSTAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>45</TXID>
+      <TXID>46</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405686042</MTIME>
-      <ATIME>1330405686017</ATIME>
+      <MTIME>1365900466121</MTIME>
+      <ATIME>1365900466097</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
-        <BLOCK_ID>-7448471719302683860</BLOCK_ID>
+        <BLOCK_ID>-406914295015578364</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
+        <GENSTAMP>1007</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>-8051065559769974521</BLOCK_ID>
+        <BLOCK_ID>208049244517243116</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1013</GENSTAMP>
+        <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
       <BLOCK>
-        <BLOCK_ID>3808670437711973616</BLOCK_ID>
+        <BLOCK_ID>-1546331983133724845</BLOCK_ID>
         <NUM_BYTES>512</NUM_BYTES>
-        <GENSTAMP>1014</GENSTAMP>
+        <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -616,10 +627,10 @@
   <RECORD>
     <OPCODE>OP_CONCAT_DELETE</OPCODE>
     <DATA>
-      <TXID>46</TXID>
+      <TXID>47</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1330405686046</TIMESTAMP>
+      <TIMESTAMP>1365900466123</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
@@ -629,15 +640,15 @@
   <RECORD>
     <OPCODE>OP_SYMLINK</OPCODE>
     <DATA>
-      <TXID>47</TXID>
+      <TXID>48</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>1008</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1330405686051</MTIME>
-      <ATIME>1330405686051</ATIME>
+      <MTIME>1365900466141</MTIME>
+      <ATIME>1365900466141</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
@@ -646,61 +657,54 @@
   <RECORD>
     <OPCODE>OP_GET_DELEGATION_TOKEN</OPCODE>
     <DATA>
-      <TXID>48</TXID>
+      <TXID>49</TXID>
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>todd</OWNER>
+        <OWNER>szetszwo</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1330405686056</ISSUE_DATE>
-        <MAX_DATE>1331010486056</MAX_DATE>
+        <ISSUE_DATE>1365900466144</ISSUE_DATE>
+        <MAX_DATE>1366505266144</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1330492086056</EXPIRY_TIME>
+      <EXPIRY_TIME>1365986866144</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_RENEW_DELEGATION_TOKEN</OPCODE>
     <DATA>
-      <TXID>49</TXID>
+      <TXID>50</TXID>
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>todd</OWNER>
+        <OWNER>szetszwo</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1330405686056</ISSUE_DATE>
-        <MAX_DATE>1331010486056</MAX_DATE>
+        <ISSUE_DATE>1365900466144</ISSUE_DATE>
+        <MAX_DATE>1366505266144</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1330492086075</EXPIRY_TIME>
+      <EXPIRY_TIME>1365986866231</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_CANCEL_DELEGATION_TOKEN</OPCODE>
     <DATA>
-      <TXID>50</TXID>
+      <TXID>51</TXID>
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>todd</OWNER>
+        <OWNER>szetszwo</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1330405686056</ISSUE_DATE>
-        <MAX_DATE>1331010486056</MAX_DATE>
+        <ISSUE_DATE>1365900466144</ISSUE_DATE>
+        <MAX_DATE>1366505266144</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_GENSTAMP</OPCODE>
-    <DATA>
-      <TXID>51</TXID>
-      <GENSTAMP>1015</GENSTAMP>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_ADD</OPCODE>
     <DATA>
       <TXID>52</TXID>
@@ -708,13 +712,13 @@
       <INODEID>1009</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405686084</MTIME>
-      <ATIME>1330405686084</ATIME>
+      <MTIME>1365900466237</MTIME>
+      <ATIME>1365900466237</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-2143415023_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_724142360_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -724,7 +728,7 @@
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
       <TXID>53</TXID>
-      <GENSTAMP>1016</GENSTAMP>
+      <GENSTAMP>1010</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
@@ -733,9 +737,9 @@
       <TXID>54</TXID>
       <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
-        <BLOCK_ID>-357061736603024522</BLOCK_ID>
+        <BLOCK_ID>-8246064927003717498</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1016</GENSTAMP>
+        <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -745,9 +749,9 @@
       <TXID>55</TXID>
       <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
-        <BLOCK_ID>-357061736603024522</BLOCK_ID>
+        <BLOCK_ID>-8246064927003717498</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1016</GENSTAMP>
+        <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
     </DATA>
   </RECORD>
@@ -755,14 +759,14 @@
     <OPCODE>OP_SET_GENSTAMP</OPCODE>
     <DATA>
       <TXID>56</TXID>
-      <GENSTAMP>1017</GENSTAMP>
+      <GENSTAMP>1011</GENSTAMP>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>57</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-2143415023_1</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_724142360_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -775,35 +779,27 @@
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1330405688726</MTIME>
-      <ATIME>1330405686084</ATIME>
+      <MTIME>1365900468855</MTIME>
+      <ATIME>1365900466237</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
-        <BLOCK_ID>-357061736603024522</BLOCK_ID>
+        <BLOCK_ID>-8246064927003717498</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENSTAMP>1017</GENSTAMP>
+        <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>todd</USERNAME>
+        <USERNAME>szetszwo</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_OWNER</OPCODE>
-    <DATA>
-      <TXID>59</TXID>
-      <SRC>/file_create</SRC>
-      <GROUPNAME>newGroup</GROUPNAME>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>60</TXID>
+      <TXID>59</TXID>
     </DATA>
   </RECORD>
 </EDITS>



Mime
View raw message