hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vinayakum...@apache.org
Subject svn commit: r1604086 [2/2] - in /hadoop/common/branches/HDFS-5442/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apac...
Date Fri, 20 Jun 2014 05:04:00 GMT
Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri Jun 20 05:03:56 2014
@@ -355,6 +355,7 @@ public class FSEditLogLoader {
             lastInodeId);
         newFile = fsDir.unprotectedAddFile(inodeId,
             path, addCloseOp.permissions, addCloseOp.aclEntries,
+            addCloseOp.xAttrs,
             replication, addCloseOp.mtime, addCloseOp.atime,
             addCloseOp.blockSize, true, addCloseOp.clientName,
             addCloseOp.clientMachine);
@@ -804,7 +805,7 @@ public class FSEditLogLoader {
     }
     case OP_SET_XATTR: {
       SetXAttrOp setXAttrOp = (SetXAttrOp) op;
-      fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr, 
+      fsDir.unprotectedSetXAttrs(setXAttrOp.src, setXAttrOp.xAttrs,
           EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
@@ -813,7 +814,8 @@ public class FSEditLogLoader {
     }
     case OP_REMOVE_XATTR: {
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
-      fsDir.unprotectedRemoveXAttr(removeXAttrOp.src, removeXAttrOp.xAttr);
+      fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
+          removeXAttrOp.xAttrs);
       break;
     }
     default:

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Fri Jun 20 05:03:56 2014
@@ -382,6 +382,16 @@ public abstract class FSEditLogOp {
     }
   }
 
+  private static List<XAttr> readXAttrsFromEditLog(DataInputStream in,
+      int logVersion) throws IOException {
+    if (!NameNodeLayoutVersion.supports(NameNodeLayoutVersion.Feature.XATTRS,
+        logVersion)) {
+      return null;
+    }
+    XAttrEditLogProto proto = XAttrEditLogProto.parseDelimitedFrom(in);
+    return PBHelper.convertXAttrs(proto.getXAttrsList());
+  }
+
   @SuppressWarnings("unchecked")
   static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp {
     int length;
@@ -394,6 +404,7 @@ public abstract class FSEditLogOp {
     Block[] blocks;
     PermissionStatus permissions;
     List<AclEntry> aclEntries;
+    List<XAttr> xAttrs;
     String clientName;
     String clientMachine;
     
@@ -461,6 +472,11 @@ public abstract class FSEditLogOp {
       return (T)this;
     }
 
+    <T extends AddCloseOp> T setXAttrs(List<XAttr> xAttrs) {
+      this.xAttrs = xAttrs;
+      return (T)this;
+    }
+
     <T extends AddCloseOp> T setClientName(String clientName) {
       this.clientName = clientName;
       return (T)this;
@@ -484,6 +500,9 @@ public abstract class FSEditLogOp {
 
       if (this.opCode == OP_ADD) {
         AclEditLogUtil.write(aclEntries, out);
+        XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
+        b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
+        b.build().writeDelimitedTo(out);
         FSImageSerialization.writeString(clientName,out);
         FSImageSerialization.writeString(clientMachine,out);
         // write clientId and callId
@@ -546,9 +565,9 @@ public abstract class FSEditLogOp {
       this.blocks = readBlocks(in, logVersion);
       this.permissions = PermissionStatus.read(in);
 
-      // clientname, clientMachine and block locations of last block.
       if (this.opCode == OP_ADD) {
         aclEntries = AclEditLogUtil.read(in, logVersion);
+        this.xAttrs = readXAttrsFromEditLog(in, logVersion);
         this.clientName = FSImageSerialization.readString(in);
         this.clientMachine = FSImageSerialization.readString(in);
         // read clientId and callId
@@ -1343,6 +1362,7 @@ public abstract class FSEditLogOp {
     long timestamp;
     PermissionStatus permissions;
     List<AclEntry> aclEntries;
+    List<XAttr> xAttrs;
 
     private MkdirOp() {
       super(OP_MKDIR);
@@ -1377,6 +1397,11 @@ public abstract class FSEditLogOp {
       return this;
     }
 
+    MkdirOp setXAttrs(List<XAttr> xAttrs) {
+      this.xAttrs = xAttrs;
+      return this;
+    }
+
     @Override
     public 
     void writeFields(DataOutputStream out) throws IOException {
@@ -1386,6 +1411,9 @@ public abstract class FSEditLogOp {
       FSImageSerialization.writeLong(timestamp, out); // atime, unused at this
       permissions.write(out);
       AclEditLogUtil.write(aclEntries, out);
+      XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
+      b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
+      b.build().writeDelimitedTo(out);
     }
     
     @Override
@@ -1430,6 +1458,8 @@ public abstract class FSEditLogOp {
 
       this.permissions = PermissionStatus.read(in);
       aclEntries = AclEditLogUtil.read(in, logVersion);
+
+      xAttrs = readXAttrsFromEditLog(in, logVersion);
     }
 
     @Override
@@ -1451,6 +1481,8 @@ public abstract class FSEditLogOp {
       builder.append(opCode);
       builder.append(", txid=");
       builder.append(txid);
+      builder.append(", xAttrs=");
+      builder.append(xAttrs);
       builder.append("]");
       return builder.toString();
     }
@@ -1468,6 +1500,9 @@ public abstract class FSEditLogOp {
       if (aclEntries != null) {
         appendAclEntriesToXml(contentHandler, aclEntries);
       }
+      if (xAttrs != null) {
+        appendXAttrsToXml(contentHandler, xAttrs);
+      }
     }
     
     @Override void fromXml(Stanza st) throws InvalidXmlException {
@@ -1477,6 +1512,7 @@ public abstract class FSEditLogOp {
       this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
       this.permissions = permissionStatusFromXml(st);
       aclEntries = readAclEntriesFromXml(st);
+      xAttrs = readXAttrsFromXml(st);
     }
   }
 
@@ -3499,7 +3535,7 @@ public abstract class FSEditLogOp {
   }
   
   static class RemoveXAttrOp extends FSEditLogOp {
-    XAttr xAttr;
+    List<XAttr> xAttrs;
     String src;
     
     private RemoveXAttrOp() {
@@ -3514,7 +3550,7 @@ public abstract class FSEditLogOp {
     void readFields(DataInputStream in, int logVersion) throws IOException {
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       src = p.getSrc();
-      xAttr = PBHelper.convertXAttr(p.getXAttr());
+      xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
     }
 
     @Override
@@ -3523,25 +3559,25 @@ public abstract class FSEditLogOp {
       if (src != null) {
         b.setSrc(src);
       }
-      b.setXAttr(PBHelper.convertXAttrProto(xAttr));
+      b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.build().writeDelimitedTo(out);
     }
 
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
-      appendXAttrToXml(contentHandler, xAttr);
+      appendXAttrsToXml(contentHandler, xAttrs);
     }
 
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
       src = st.getValue("SRC");
-      xAttr = readXAttrFromXml(st);
+      xAttrs = readXAttrsFromXml(st);
     }
   }
   
   static class SetXAttrOp extends FSEditLogOp {
-    XAttr xAttr;
+    List<XAttr> xAttrs;
     String src;
     
     private SetXAttrOp() {
@@ -3556,7 +3592,7 @@ public abstract class FSEditLogOp {
     void readFields(DataInputStream in, int logVersion) throws IOException {
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       src = p.getSrc();
-      xAttr = PBHelper.convertXAttr(p.getXAttr());
+      xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
       readRpcIds(in, logVersion);
     }
 
@@ -3566,7 +3602,7 @@ public abstract class FSEditLogOp {
       if (src != null) {
         b.setSrc(src);
       }
-      b.setXAttr(PBHelper.convertXAttrProto(xAttr));
+      b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.build().writeDelimitedTo(out);
       // clientId and callId
       writeRpcIds(rpcClientId, rpcCallId, out);
@@ -3575,14 +3611,14 @@ public abstract class FSEditLogOp {
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
-      appendXAttrToXml(contentHandler, xAttr);
+      appendXAttrsToXml(contentHandler, xAttrs);
       appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
 
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
       src = st.getValue("SRC");
-      xAttr = readXAttrFromXml(st);
+      xAttrs = readXAttrsFromXml(st);
       readRpcIdsFromXml(st);
     }
   }
@@ -4202,42 +4238,48 @@ public abstract class FSEditLogOp {
     }
     return aclEntries;
   }
-  
-  private static void appendXAttrToXml(ContentHandler contentHandler,
-      XAttr xAttr) throws SAXException {
-    contentHandler.startElement("", "", "XATTR", new AttributesImpl());
-    XMLUtils.addSaxString(contentHandler, "NAMESPACE", 
-        xAttr.getNameSpace().toString());
-    XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
-    if (xAttr.getValue() != null) {
-      try {
-        XMLUtils.addSaxString(contentHandler, "VALUE", 
-            XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
-      } catch (IOException e) {
-        throw new SAXException(e);
+
+  private static void appendXAttrsToXml(ContentHandler contentHandler,
+      List<XAttr> xAttrs) throws SAXException {
+    for (XAttr xAttr: xAttrs) {
+      contentHandler.startElement("", "", "XATTR", new AttributesImpl());
+      XMLUtils.addSaxString(contentHandler, "NAMESPACE",
+          xAttr.getNameSpace().toString());
+      XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
+      if (xAttr.getValue() != null) {
+        try {
+          XMLUtils.addSaxString(contentHandler, "VALUE",
+              XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
+        } catch (IOException e) {
+          throw new SAXException(e);
+        }
       }
+      contentHandler.endElement("", "", "XATTR");
     }
-    contentHandler.endElement("", "", "XATTR");
   }
-  
-  private static XAttr readXAttrFromXml(Stanza st) 
+
+  private static List<XAttr> readXAttrsFromXml(Stanza st)
       throws InvalidXmlException {
     if (!st.hasChildren("XATTR")) {
       return null;
     }
-    
-    Stanza a = st.getChildren("XATTR").get(0);
-    XAttr.Builder builder = new XAttr.Builder();
-    builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))).
-        setName(a.getValue("NAME"));
-    String v = a.getValueOrNull("VALUE");
-    if (v != null) {
-      try {
-        builder.setValue(XAttrCodec.decodeValue(v));
-      } catch (IOException e) {
-        throw new InvalidXmlException(e.toString());
+
+    List<Stanza> stanzas = st.getChildren("XATTR");
+    List<XAttr> xattrs = Lists.newArrayListWithCapacity(stanzas.size());
+    for (Stanza a: stanzas) {
+      XAttr.Builder builder = new XAttr.Builder();
+      builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))).
+          setName(a.getValue("NAME"));
+      String v = a.getValueOrNull("VALUE");
+      if (v != null) {
+        try {
+          builder.setValue(XAttrCodec.decodeValue(v));
+        } catch (IOException e) {
+          throw new InvalidXmlException(e.toString());
+        }
       }
+      xattrs.add(builder.build());
     }
-    return builder.build();
+    return xattrs;
   }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java Fri Jun 20 05:03:56 2014
@@ -533,8 +533,10 @@ public final class FSImageFormatPBINode 
       INodeSection.INodeFile.Builder b = buildINodeFile(n,
           parent.getSaverContext());
 
-      for (Block block : n.getBlocks()) {
-        b.addBlocks(PBHelper.convert(block));
+      if (n.getBlocks() != null) {
+        for (Block block : n.getBlocks()) {
+          b.addBlocks(PBHelper.convert(block));
+        }
       }
 
       FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Jun 20 05:03:56 2014
@@ -103,6 +103,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -514,6 +515,59 @@ public class FSNamesystem implements Nam
 
   private final NNConf nnConf;
 
+  private volatile boolean imageLoaded = false;
+  private final Condition cond;
+  /**
+   * Notify that loading of this FSDirectory is complete, and
+   * it is imageLoaded for use
+   */
+  void imageLoadComplete() {
+    Preconditions.checkState(!imageLoaded, "FSDirectory already loaded");
+    setImageLoaded();
+  }
+
+  void setImageLoaded() {
+    if(imageLoaded) return;
+    writeLock();
+    try {
+      setImageLoaded(true);
+      dir.markNameCacheInitialized();
+      cond.signalAll();
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  //This is for testing purposes only
+  @VisibleForTesting
+  boolean isImageLoaded() {
+    return imageLoaded;
+  }
+
+  // exposed for unit tests
+  protected void setImageLoaded(boolean flag) {
+    imageLoaded = flag;
+  }
+
+  /**
+   * Block until the object is imageLoaded to be used.
+   */
+  void waitForLoadingFSImage() {
+    if (!imageLoaded) {
+      writeLock();
+      try {
+        while (!imageLoaded) {
+          try {
+            cond.await(5000, TimeUnit.MILLISECONDS);
+          } catch (InterruptedException ignored) {
+          }
+        }
+      } finally {
+        writeUnlock();
+      }
+    }
+  }
+
   /**
    * Set the last allocated inode id when fsimage or editlog is loaded. 
    */
@@ -555,6 +609,7 @@ public class FSNamesystem implements Nam
     inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
     snapshotManager.clearSnapshottableDirs();
     cacheManager.clear();
+    setImageLoaded(false);
   }
 
   @VisibleForTesting
@@ -682,6 +737,7 @@ public class FSNamesystem implements Nam
     boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
     LOG.info("fsLock is fair:" + fair);
     fsLock = new FSNamesystemLock(fair);
+    cond = fsLock.writeLock().newCondition();
     try {
       resourceRecheckInterval = conf.getLong(
           DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
@@ -921,7 +977,7 @@ public class FSNamesystem implements Nam
       }
       writeUnlock();
     }
-    dir.imageLoadComplete();
+    imageLoadComplete();
   }
 
   private void startSecretManager() {
@@ -1840,6 +1896,7 @@ public class FSNamesystem implements Nam
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2115,6 +2172,7 @@ public class FSNamesystem implements Nam
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2242,6 +2300,8 @@ public class FSNamesystem implements Nam
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     boolean create = flag.contains(CreateFlag.CREATE);
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
+
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2524,10 +2584,10 @@ public class FSNamesystem implements Nam
       // We found the lease for this file. And surprisingly the original
       // holder is trying to recreate this file. This should never occur.
       //
+
       if (!force && lease != null) {
         Lease leaseFile = leaseManager.getLeaseByPath(src);
-        if ((leaseFile != null && leaseFile.equals(lease)) ||
-            lease.getHolder().equals(holder)) { 
+        if (leaseFile != null && leaseFile.equals(lease)) {
           throw new AlreadyBeingCreatedException(
             "failed to create file " + src + " for " + holder +
             " for client " + clientMachine +
@@ -2730,6 +2790,7 @@ public class FSNamesystem implements Nam
     Block newBlock = null;
     long offset;
     checkOperation(OperationCategory.WRITE);
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2952,6 +3013,7 @@ public class FSNamesystem implements Nam
     }
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -3050,6 +3112,7 @@ public class FSNamesystem implements Nam
     boolean success = false;
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -3249,6 +3312,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
+      waitForLoadingFSImage();
       src = FSDirectory.resolvePath(src, srcComponents, dir);
       dst = FSDirectory.resolvePath(dst, dstComponents, dir);
       checkOperation(OperationCategory.WRITE);
@@ -3356,6 +3420,7 @@ public class FSNamesystem implements Nam
           false);
     }
 
+    waitForLoadingFSImage();
     long mtime = now();
     dir.renameTo(src, dst, mtime, options);
     getEditLog().logRename(src, dst, mtime, logRetryCache, options);
@@ -3429,6 +3494,8 @@ public class FSNamesystem implements Nam
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     boolean ret = false;
+
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -3902,6 +3969,8 @@ public class FSNamesystem implements Nam
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -4103,6 +4172,7 @@ public class FSNamesystem implements Nam
       INodeFile pendingFile, int latestSnapshot) throws IOException,
       UnresolvedLinkException {
     assert hasWriteLock();
+
     FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
     Preconditions.checkArgument(uc != null);
     leaseManager.removeLease(uc.getClientName(), src);
@@ -4114,6 +4184,7 @@ public class FSNamesystem implements Nam
     // since we just remove the uc feature from pendingFile
     final INodeFile newFile = pendingFile.toCompleteFile(now());
 
+    waitForLoadingFSImage();
     // close file and persist block allocations for this file
     closeFile(src, newFile);
 
@@ -4172,6 +4243,7 @@ public class FSNamesystem implements Nam
              + ")");
     checkOperation(OperationCategory.WRITE);
     String src = "";
+    waitForLoadingFSImage();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -4517,7 +4589,7 @@ public class FSNamesystem implements Nam
    */
   private void closeFile(String path, INodeFile file) {
     assert hasWriteLock();
-    dir.waitForReady();
+    waitForLoadingFSImage();
     // file is closed
     getEditLog().logCloseFile(path, file);
     if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -4541,7 +4613,7 @@ public class FSNamesystem implements Nam
                                   boolean createParent, boolean logRetryCache)
       throws UnresolvedLinkException, FileAlreadyExistsException,
       QuotaExceededException, SnapshotAccessControlException, AclException {
-    dir.waitForReady();
+    waitForLoadingFSImage();
 
     final long modTime = now();
     if (createParent) {
@@ -5804,7 +5876,7 @@ public class FSNamesystem implements Nam
       boolean ignoreEmptyDir, boolean resolveLink)
       throws AccessControlException, UnresolvedLinkException {
     if (!pc.isSuperUser()) {
-      dir.waitForReady();
+      waitForLoadingFSImage();
       readLock();
       try {
         pc.checkPermission(path, dir, doCheckOwner, ancestorAccess,
@@ -6271,6 +6343,7 @@ public class FSNamesystem implements Nam
              + ", newNodes=" + Arrays.asList(newNodes)
              + ", clientName=" + clientName
              + ")");
+    waitForLoadingFSImage();
     writeLock();
     boolean success = false;
     try {
@@ -8121,8 +8194,10 @@ public class FSNamesystem implements Nam
         checkOwner(pc, src);
         checkPathAccess(pc, src, FsAction.WRITE);
       }
-      dir.setXAttr(src, xAttr, flag);
-      getEditLog().logSetXAttr(src, xAttr, logRetryCache);
+      List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+      xAttrs.add(xAttr);
+      dir.setXAttrs(src, xAttrs, flag);
+      getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -8242,10 +8317,12 @@ public class FSNamesystem implements Nam
         checkOwner(pc, src);
         checkPathAccess(pc, src, FsAction.WRITE);
       }
-      
-      XAttr removedXAttr = dir.removeXAttr(src, xAttr);
-      if (removedXAttr != null) {
-        getEditLog().logRemoveXAttr(src, removedXAttr);
+
+      List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+      xAttrs.add(xAttr);
+      List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
+      if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
+        getEditLog().logRemoveXAttrs(src, removedXAttrs);
       }
       resultingStat = getAuditFileInfo(src, false);
     } catch (AccessControlException e) {

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Jun 20 05:03:56 2014
@@ -598,7 +598,8 @@ public class NameNode implements NameNod
     
     pauseMonitor = new JvmPauseMonitor(conf);
     pauseMonitor.start();
-
+    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
+    
     startCommonServices(conf);
   }
   

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Jun 20 05:03:56 2014
@@ -1064,7 +1064,7 @@ public class SecondaryNameNode implement
       } finally {
         dstNamesystem.writeUnlock();
       }
-      dstNamesystem.dir.imageLoadComplete();
+      dstNamesystem.imageLoadComplete();
     }
     // error simulation code for junit test
     CheckpointFaultInjector.getInstance().duringMerge();   

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Fri Jun 20 05:03:56 2014
@@ -98,7 +98,11 @@ public class NameNodeMetrics {
   @Metric("GetImageServlet putImage")
   MutableRate putImage;
 
-  NameNodeMetrics(String processName, String sessionId, int[] intervals) {
+  JvmMetrics jvmMetrics = null;
+  
+  NameNodeMetrics(String processName, String sessionId, int[] intervals,
+      final JvmMetrics jvmMetrics) {
+    this.jvmMetrics = jvmMetrics;
     registry.tag(ProcessName, processName).tag(SessionId, sessionId);
     
     final int len = intervals.length;
@@ -124,14 +128,19 @@ public class NameNodeMetrics {
     String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
     String processName = r.toString();
     MetricsSystem ms = DefaultMetricsSystem.instance();
-    JvmMetrics.create(processName, sessionId, ms);
+    JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
     
     // Percentile measurement is off by default, by watching no intervals
     int[] intervals = 
         conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
-    return ms.register(new NameNodeMetrics(processName, sessionId, intervals));
+    return ms.register(new NameNodeMetrics(processName, sessionId,
+        intervals, jm));
   }
 
+  public JvmMetrics getJvmMetrics() {
+    return jvmMetrics;
+  }
+  
   public void shutdown() {
     DefaultMetricsSystem.shutdown();
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java Fri Jun 20 05:03:56 2014
@@ -159,7 +159,7 @@ public class FileWithSnapshotFeature imp
         // resize the array.  
         final BlockInfo[] newBlocks;
         if (n == 0) {
-          newBlocks = null;
+          newBlocks = BlockInfo.EMPTY_ARRAY;
         } else {
           newBlocks = new BlockInfo[n];
           System.arraycopy(oldBlocks, 0, newBlocks, 0, n);

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Jun 20 05:03:56 2014
@@ -344,8 +344,6 @@ public class WebHdfsFileSystem extends F
    */
   private synchronized void resetStateToFailOver() {
     currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length;
-    delegationToken = null;
-    tokenAspect.reset();
   }
 
   /**

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto Fri Jun 20 05:03:56 2014
@@ -35,8 +35,8 @@ message XAttrProto {
 }
   
 message XAttrEditLogProto {
-  required string src = 1;
-  optional XAttrProto xAttr = 2;
+  optional string src = 1;
+  repeated XAttrProto xAttrs = 2;
 }
 
 enum XAttrSetFlagProto {

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm Fri Jun 20 05:03:56 2014
@@ -287,13 +287,14 @@ HDFS Federation
 
   Policy could be:
 
-  * <<<node>>> - this is the <default> policy. This balances the storage at 
+  * <<<datanode>>> - this is the <default> policy. This balances the storage at 
     the datanode level. This is similar to balancing policy from prior releases.
 
   * <<<blockpool>>> - this balances the storage at the block pool level. 
     Balancing at block pool level balances storage at the datanode level also.
 
-  Note that Balander only balances the data and does not balance the namespace.
+  Note that Balancer only balances the data and does not balance the namespace.
+  For the complete command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
 
 ** Decommissioning
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Fri Jun 20 05:03:56 2014
@@ -88,6 +88,25 @@ HDFS NFS Gateway
   </property>
 ----
 
+   The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
+   that prevent it from working correctly by default with the HDFS NFS
+   Gateway. If you want to be able to access the HDFS NFS Gateway from AIX, you
+   should set the following configuration setting to enable work-arounds for these
+   issues:
+
+----
+<property>
+  <name>nfs.aix.compatibility.mode.enabled</name>
+  <value>true</value>
+</property>
+----
+
+   Note that regular, non-AIX clients should NOT enable AIX compatibility mode.
+   The work-arounds implemented by AIX compatibility mode effectively disable
+   safeguards to ensure that listing of directory contents via NFS returns
+   consistent results, and that all data sent to the NFS server can be assured to
+   have been committed.
+
    It's strongly recommended for the users to update a few configuration properties based on their use
    cases. All the related configuration properties can be added or updated in hdfs-site.xml.
   
@@ -322,6 +341,22 @@ HDFS NFS Gateway
   Then the users can access HDFS as part of the local file system except that, 
   hard link and random write are not supported yet.
 
+* {Allow mounts from unprivileged clients}
+
+  In environments where root access on client machines is not generally
+  available, some measure of security can be obtained by ensuring that only NFS
+  clients originating from privileged ports can connect to the NFS server. This
+  feature is referred to as "port monitoring." This feature is not enabled by default
+  in the HDFS NFS Gateway, but can be optionally enabled by setting the
+  following config in hdfs-site.xml on the NFS Gateway machine:
+
+-------------------------------------------------------------------
+<property>
+  <name>nfs.port.monitoring.disabled</name>
+  <value>false</value>
+</property>
+-------------------------------------------------------------------
+
 * {User authentication and mapping}
 
   NFS gateway in this release uses AUTH_UNIX style authentication. When the user on NFS client

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm Fri Jun 20 05:03:56 2014
@@ -77,7 +77,7 @@ HDFS Users Guide
           * <<<fetchdt>>>: a utility to fetch DelegationToken and store it in a
             file on the local system.
 
-          * Rebalancer: tool to balance the cluster when the data is
+          * Balancer: tool to balance the cluster when the data is
             unevenly distributed among DataNodes.
 
           * Upgrade and rollback: after a software upgrade, it is possible
@@ -316,7 +316,7 @@ HDFS Users Guide
 
    For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
-* Rebalancer
+* Balancer
 
    HDFS data might not always be be placed uniformly across the DataNode.
    One common reason is addition of new DataNodes to an existing cluster.
@@ -338,7 +338,7 @@ HDFS Users Guide
    Due to multiple competing considerations, data might not be uniformly
    placed across the DataNodes. HDFS provides a tool for administrators
    that analyzes block placement and rebalanaces data across the DataNode.
-   A brief administrator's guide for rebalancer as a PDF is attached to
+   A brief administrator's guide for balancer is available at 
    {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
 
    For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml Fri Jun 20 05:03:56 2014
@@ -217,7 +217,7 @@
     
   <subsection name="DFSAdmin Commands" id="dfsadminCommands">
   <h4><code>dfsadmin -rollingUpgrade</code></h4>
-  <source>hdfs dfsadmin -rollingUpgrade &lt;query|start|finalize&gt;</source>
+  <source>hdfs dfsadmin -rollingUpgrade &lt;query|prepare|finalize&gt;</source>
   <p>
     Execute a rolling upgrade action.
     <ul><li>Options:<table>

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Jun 20 05:03:56 2014
@@ -153,6 +153,15 @@ public class TestLeaseRecovery2 {
     verifyFile(dfs, filepath1, actual, size);
   }
 
+  @Test
+  public void testLeaseRecoverByAnotherUser() throws Exception {
+    byte [] actual = new byte[FILE_SIZE];
+    cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
+    Path filepath = createFile("/immediateRecoverLease-x", 0, true);
+    recoverLeaseUsingCreate2(filepath);
+    verifyFile(dfs, filepath, actual, 0);
+  }
+
   private Path createFile(final String filestr, final int size,
       final boolean triggerLeaseRenewerInterrupt)
   throws IOException, InterruptedException {
@@ -196,7 +205,7 @@ public class TestLeaseRecovery2 {
   }
 
   private void recoverLeaseUsingCreate(Path filepath)
-  throws IOException, InterruptedException {
+      throws IOException, InterruptedException {
     FileSystem dfs2 = getFSAsAnotherUser(conf);
     for(int i = 0; i < 10; i++) {
       AppendTestUtil.LOG.info("i=" + i);
@@ -216,6 +225,20 @@ public class TestLeaseRecovery2 {
     fail("recoverLeaseUsingCreate failed");
   }
 
+  private void recoverLeaseUsingCreate2(Path filepath)
+          throws Exception {
+    FileSystem dfs2 = getFSAsAnotherUser(conf);
+    int size = AppendTestUtil.nextInt(FILE_SIZE);
+    DistributedFileSystem dfsx = (DistributedFileSystem) dfs2;
+    //create file using dfsx
+    Path filepath2 = new Path("/immediateRecoverLease-x2");
+    FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE,
+        REPLICATION_NUM, BLOCK_SIZE);
+    assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2"));
+    try {Thread.sleep(10000);} catch (InterruptedException e) {}
+    dfsx.append(filepath);
+  }
+
   private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
       int size) throws IOException {
     AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Fri Jun 20 05:03:56 2014
@@ -29,6 +29,8 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**
@@ -42,6 +44,34 @@ public class TestBlockInfo {
   private static final Log LOG = LogFactory
       .getLog("org.apache.hadoop.hdfs.TestBlockInfo");
 
+
+  @Test
+  public void testAddStorage() throws Exception {
+    BlockInfo blockInfo = new BlockInfo(3);
+
+    final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
+
+    boolean added = blockInfo.addStorage(storage);
+
+    Assert.assertTrue(added);
+    Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
+  }
+
+
+  @Test
+  public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception {
+    BlockInfo blockInfo = new BlockInfo(3);
+
+    final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
+    final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
+
+    blockInfo.addStorage(storage1);
+    boolean added = blockInfo.addStorage(storage2);
+
+    Assert.assertFalse(added);
+    Assert.assertEquals(storage2, blockInfo.getStorageInfo(0));
+  }
+
   @Test
   public void testBlockListMoveToHead() throws Exception {
     LOG.info("BlockInfo moveToHead tests...");

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Fri Jun 20 05:03:56 2014
@@ -32,7 +32,6 @@ import java.io.IOException;
 
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.*;
 
 /**
@@ -50,6 +49,7 @@ public class TestCommitBlockSynchronizat
     final DatanodeStorageInfo[] targets = {};
 
     FSNamesystem namesystem = new FSNamesystem(conf, image);
+    namesystem.setImageLoaded(true);
     FSNamesystem namesystemSpy = spy(namesystem);
     BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
         block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Fri Jun 20 05:03:56 2014
@@ -24,7 +24,9 @@ import java.io.IOException;
 import java.io.StringReader;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Random;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -36,7 +38,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -45,6 +46,11 @@ import org.junit.Test;
 
 import com.google.common.collect.Lists;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 /**
  * Test {@link FSDirectory}, the in-memory namespace tree.
  */
@@ -74,6 +80,10 @@ public class TestFSDirectory {
 
   private DistributedFileSystem hdfs;
 
+  private static final int numGeneratedXAttrs = 256;
+  private static final ImmutableList<XAttr> generatedXAttrs =
+      ImmutableList.copyOf(generateXAttrs(numGeneratedXAttrs));
+
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
@@ -119,25 +129,16 @@ public class TestFSDirectory {
     for(; (line = in.readLine()) != null; ) {
       line = line.trim();
       if (!line.isEmpty() && !line.contains("snapshot")) {
-        Assert.assertTrue("line=" + line,
+        assertTrue("line=" + line,
             line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
-            || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
+                || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)
+        );
         checkClassName(line);
       }
     }
   }
   
   @Test
-  public void testReset() throws Exception {
-    fsdir.reset();
-    Assert.assertFalse(fsdir.isReady());
-    final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
-    Assert.assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
-    fsdir.imageLoadComplete();
-    Assert.assertTrue(fsdir.isReady());
-  }
-
-  @Test
   public void testSkipQuotaCheck() throws Exception {
     try {
       // set quota. nsQuota of 1 means no files can be created
@@ -176,7 +177,7 @@ public class TestFSDirectory {
     int i = line.lastIndexOf('(');
     int j = line.lastIndexOf('@');
     final String classname = line.substring(i+1, j);
-    Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
+    assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
         || classname.startsWith(INodeDirectory.class.getSimpleName()));
   }
   
@@ -193,22 +194,185 @@ public class TestFSDirectory {
     // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
     XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
         setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
-    List<XAttr> xAttrs = fsdir.setINodeXAttr(existingXAttrs, newXAttr, 
+    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(1);
+    newXAttrs.add(newXAttr);
+    List<XAttr> xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
         EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
-    Assert.assertEquals(xAttrs.size(), 3);
+    assertEquals(xAttrs.size(), 3);
     
     // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
     XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
         XAttr.NameSpace.TRUSTED).setName("a4").
         setValue(new byte[]{0x34, 0x34, 0x34}).build();
+    newXAttrs.set(0, newXAttr1);
     try {
-      fsdir.setINodeXAttr(existingXAttrs, newXAttr1, 
+      fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
           EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
-      Assert.fail("Setting user visable xattr on inode should fail if " +
+      fail("Setting user visible xattr on inode should fail if " +
           "reaching limit.");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
           "to inode, would exceed limit", e);
     }
   }
+
+  /**
+   * Verify that the first <i>num</i> generatedXAttrs are present in
+   * newXAttrs.
+   */
+  private static void verifyXAttrsPresent(List<XAttr> newXAttrs,
+      final int num) {
+    assertEquals("Unexpected number of XAttrs after multiset", num,
+        newXAttrs.size());
+    for (int i=0; i<num; i++) {
+      XAttr search = generatedXAttrs.get(i);
+      assertTrue("Did not find set XAttr " + search + " + after multiset",
+          newXAttrs.contains(search));
+    }
+  }
+
+  private static List<XAttr> generateXAttrs(final int numXAttrs) {
+    List<XAttr> generatedXAttrs = Lists.newArrayListWithCapacity(numXAttrs);
+    for (int i=0; i<numXAttrs; i++) {
+      XAttr xAttr = (new XAttr.Builder())
+          .setNameSpace(XAttr.NameSpace.SYSTEM)
+          .setName("a" + i)
+          .setValue(new byte[] { (byte) i, (byte) (i + 1), (byte) (i + 2) })
+          .build();
+      generatedXAttrs.add(xAttr);
+    }
+    return generatedXAttrs;
+  }
+
+  /**
+   * Test setting and removing multiple xattrs via single operations
+   */
+  @Test(timeout=300000)
+  public void testXAttrMultiSetRemove() throws Exception {
+    List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(0);
+
+    // Keep adding a random number of xattrs and verifying until exhausted
+    final Random rand = new Random(0xFEEDA);
+    int numExpectedXAttrs = 0;
+    while (numExpectedXAttrs < numGeneratedXAttrs) {
+      LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
+      final int numToAdd = rand.nextInt(5)+1;
+
+      List<XAttr> toAdd = Lists.newArrayListWithCapacity(numToAdd);
+      for (int i = 0; i < numToAdd; i++) {
+        if (numExpectedXAttrs >= numGeneratedXAttrs) {
+          break;
+        }
+        toAdd.add(generatedXAttrs.get(numExpectedXAttrs));
+        numExpectedXAttrs++;
+      }
+      LOG.info("Attempting to add " + toAdd.size() + " XAttrs");
+      for (int i = 0; i < toAdd.size(); i++) {
+        LOG.info("Will add XAttr " + toAdd.get(i));
+      }
+      List<XAttr> newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+          EnumSet.of(XAttrSetFlag.CREATE));
+      verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
+      existingXAttrs = newXAttrs;
+    }
+
+    // Keep removing a random number of xattrs and verifying until all gone
+    while (numExpectedXAttrs > 0) {
+      LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
+      final int numToRemove = rand.nextInt(5)+1;
+      List<XAttr> toRemove = Lists.newArrayListWithCapacity(numToRemove);
+      for (int i = 0; i < numToRemove; i++) {
+        if (numExpectedXAttrs == 0) {
+          break;
+        }
+        toRemove.add(generatedXAttrs.get(numExpectedXAttrs-1));
+        numExpectedXAttrs--;
+      }
+      final int expectedNumToRemove = toRemove.size();
+      LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs");
+      List<XAttr> removedXAttrs = Lists.newArrayList();
+      List<XAttr> newXAttrs = fsdir.filterINodeXAttrs(existingXAttrs,
+          toRemove, removedXAttrs);
+      assertEquals("Unexpected number of removed XAttrs",
+          expectedNumToRemove, removedXAttrs.size());
+      verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
+      existingXAttrs = newXAttrs;
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testXAttrMultiAddRemoveErrors() throws Exception {
+
+    // Test that the same XAttr can not be multiset twice
+    List<XAttr> existingXAttrs = Lists.newArrayList();
+    List<XAttr> toAdd = Lists.newArrayList();
+    toAdd.add(generatedXAttrs.get(0));
+    toAdd.add(generatedXAttrs.get(1));
+    toAdd.add(generatedXAttrs.get(2));
+    toAdd.add(generatedXAttrs.get(0));
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .CREATE));
+      fail("Specified the same xattr to be set twice");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot specify the same " +
+          "XAttr to be set", e);
+    }
+
+    // Test that CREATE and REPLACE flags are obeyed
+    toAdd.remove(generatedXAttrs.get(0));
+    existingXAttrs.add(generatedXAttrs.get(0));
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .CREATE));
+      fail("Set XAttr that is already set without REPLACE flag");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("already exists", e);
+    }
+    try {
+      fsdir.setINodeXAttrs(existingXAttrs, toAdd, EnumSet.of(XAttrSetFlag
+          .REPLACE));
+      fail("Set XAttr that does not exist without the CREATE flag");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("does not exist", e);
+    }
+
+    // Sanity test for CREATE
+    toAdd.remove(generatedXAttrs.get(0));
+    List<XAttr> newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.CREATE));
+    assertEquals("Unexpected toAdd size", 2, toAdd.size());
+    for (XAttr x : toAdd) {
+      assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x));
+    }
+    existingXAttrs = newXAttrs;
+
+    // Sanity test for REPLACE
+    toAdd = Lists.newArrayList();
+    for (int i=0; i<3; i++) {
+      XAttr xAttr = (new XAttr.Builder())
+          .setNameSpace(XAttr.NameSpace.SYSTEM)
+          .setName("a" + i)
+          .setValue(new byte[] { (byte) (i*2) })
+          .build();
+      toAdd.add(xAttr);
+    }
+    newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.REPLACE));
+    assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size());
+    for (int i=0; i<3; i++) {
+      assertArrayEquals("Unexpected XAttr value",
+          new byte[] {(byte)(i*2)}, newXAttrs.get(i).getValue());
+    }
+    existingXAttrs = newXAttrs;
+
+    // Sanity test for CREATE+REPLACE
+    toAdd = Lists.newArrayList();
+    for (int i=0; i<4; i++) {
+      toAdd.add(generatedXAttrs.get(i));
+    }
+    newXAttrs = fsdir.setINodeXAttrs(existingXAttrs, toAdd,
+        EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+    verifyXAttrsPresent(newXAttrs, 4);
+  }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Fri Jun 20 05:03:56 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.junit.After;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -194,4 +195,22 @@ public class TestFSNamesystem {
     assertFalse(rwLock.isWriteLockedByCurrentThread());
     assertEquals(0, rwLock.getWriteHoldCount());
   }
+
+  @Test
+  public void testReset() throws Exception {
+    Configuration conf = new Configuration();
+    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
+    FSImage fsImage = Mockito.mock(FSImage.class);
+    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
+    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+    fsn.imageLoadComplete();
+    assertTrue(fsn.isImageLoaded());
+    fsn.clear();
+    assertFalse(fsn.isImageLoaded());
+    final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
+            .getINode("/");
+    assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
+    fsn.imageLoadComplete();
+    assertTrue(fsn.isImageLoaded());
+  }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Fri Jun 20 05:03:56 2014
@@ -19,12 +19,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.apache.hadoop.util.Time.now;
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.IOException;
@@ -57,7 +54,7 @@ public class TestFsLimits {
     FSEditLog editLog = mock(FSEditLog.class);
     doReturn(editLog).when(fsImage).getEditLog();
     FSNamesystem fsn = new FSNamesystem(conf, fsImage);
-    fsn.getFSDirectory().setReady(fsIsReady);
+    fsn.setImageLoaded(fsIsReady);
     return fsn;
   }
 

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Fri Jun 20 05:03:56 2014
@@ -171,8 +171,6 @@ public class TestRenameWithSnapshots {
   private static boolean existsInDiffReport(List<DiffReportEntry> entries,
       DiffType type, String relativePath) {
     for (DiffReportEntry entry : entries) {
-      System.out.println("DiffEntry is:" + entry.getType() + "\""
-          + new String(entry.getRelativePath()) + "\"");
       if ((entry.getType() == type)
           && ((new String(entry.getRelativePath())).compareTo(relativePath) == 0)) {
         return true;
@@ -2374,4 +2372,46 @@ public class TestRenameWithSnapshots {
     // save namespace and restart
     restartClusterAndCheckImage(true);
   }
+
+  @Test
+  public void testRenameWithOverWrite() throws Exception {
+    final Path root = new Path("/");
+    final Path foo = new Path(root, "foo");
+    final Path file1InFoo = new Path(foo, "file1");
+    final Path file2InFoo = new Path(foo, "file2");
+    final Path file3InFoo = new Path(foo, "file3");
+    DFSTestUtil.createFile(hdfs, file1InFoo, 1L, REPL, SEED);
+    DFSTestUtil.createFile(hdfs, file2InFoo, 1L, REPL, SEED);
+    DFSTestUtil.createFile(hdfs, file3InFoo, 1L, REPL, SEED);
+    final Path bar = new Path(root, "bar");
+    hdfs.mkdirs(bar);
+
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+    // move file1 from foo to bar
+    final Path fileInBar = new Path(bar, "file1");
+    hdfs.rename(file1InFoo, fileInBar);
+    // rename bar to newDir
+    final Path newDir = new Path(root, "newDir");
+    hdfs.rename(bar, newDir);
+    // move file2 from foo to newDir
+    final Path file2InNewDir = new Path(newDir, "file2");
+    hdfs.rename(file2InFoo, file2InNewDir);
+    // move file3 from foo to newDir and rename it to file1, this will overwrite
+    // the original file1
+    final Path file1InNewDir = new Path(newDir, "file1");
+    hdfs.rename(file3InFoo, file1InNewDir, Rename.OVERWRITE);
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+
+    SnapshotDiffReport report = hdfs.getSnapshotDiffReport(root, "s0", "s1");
+    LOG.info("DiffList is \n\"" + report.toString() + "\"");
+    List<DiffReportEntry> entries = report.getDiffList();
+    assertEquals(7, entries.size());
+    assertTrue(existsInDiffReport(entries, DiffType.MODIFY, ""));
+    assertTrue(existsInDiffReport(entries, DiffType.MODIFY, foo.getName()));
+    assertTrue(existsInDiffReport(entries, DiffType.DELETE, bar.getName()));
+    assertTrue(existsInDiffReport(entries, DiffType.CREATE, newDir.getName()));
+    assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file1"));
+    assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file2"));
+    assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file3"));
+  }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Fri Jun 20 05:03:56 2014
@@ -28,12 +28,14 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -396,4 +398,39 @@ public class TestSnapshotBlocksMap {
     assertEquals(1, blks.length);
     assertEquals(BLOCKSIZE, blks[0].getNumBytes());
   }
+  
+  /**
+   * Make sure that a delete of a non-zero-length file which results in a
+   * zero-length file in a snapshot works.
+   */
+  @Test
+  public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    final byte[] testData = "foo bar baz".getBytes();
+    
+    // Create a zero-length file.
+    DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
+    assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);
+
+    // Create a snapshot that includes that file.
+    SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
+    
+    // Extend that file.
+    FSDataOutputStream out = hdfs.append(bar);
+    out.write(testData);
+    out.close();
+    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
+    BlockInfo[] blks = barNode.getBlocks();
+    assertEquals(1, blks.length);
+    assertEquals(testData.length, blks[0].getNumBytes());
+    
+    // Delete the file.
+    hdfs.delete(bar, true);
+    
+    // Now make sure that the NN can still save an fsimage successfully.
+    cluster.getNameNode().getRpcServer().setSafeMode(
+        SafeModeAction.SAFEMODE_ENTER, false);
+    cluster.getNameNode().getRpcServer().saveNamespace();
+  }
 }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java Fri Jun 20 05:03:56 2014
@@ -249,10 +249,10 @@ public class TestXAttrWithSnapshot {
   private static void doSnapshotRootRemovalAssertions(Path path,
       Path snapshotPath) throws Exception {
     Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 0);
+    Assert.assertEquals(0, xattrs.size());
 
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(xattrs.size(), 2);
+    Assert.assertEquals(2, xattrs.size());
     Assert.assertArrayEquals(value1, xattrs.get(name1));
     Assert.assertArrayEquals(value2, xattrs.get(name2));
   }

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1604086&r1=1604085&r2=1604086&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/HDFS-5442/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Fri Jun 20 05:03:56 2014
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1394849922137</EXPIRY_DATE>
-        <KEY>37e1a64049bbef35</KEY>
+        <EXPIRY_DATE>1403590428625</EXPIRY_DATE>
+        <KEY>16f34bfba67b2552</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1394849922140</EXPIRY_DATE>
-        <KEY>7c0bf5039242fc54</KEY>
+        <EXPIRY_DATE>1403590428631</EXPIRY_DATE>
+        <KEY>dbe6282854469833</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,18 +37,18 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722811</MTIME>
-      <ATIME>1394158722811</ATIME>
+      <MTIME>1402899229669</MTIME>
+      <ATIME>1402899229669</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1233039831_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>6</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>8</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -59,13 +59,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722832</MTIME>
-      <ATIME>1394158722811</ATIME>
+      <MTIME>1402899229711</MTIME>
+      <ATIME>1402899229669</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -78,9 +78,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1394158722836</TIMESTAMP>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>8</RPC_CALLID>
+      <TIMESTAMP>1402899229718</TIMESTAMP>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>10</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -89,9 +89,9 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1394158722842</TIMESTAMP>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>9</RPC_CALLID>
+      <TIMESTAMP>1402899229730</TIMESTAMP>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -101,9 +101,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1394158722848</TIMESTAMP>
+      <TIMESTAMP>1402899229748</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -136,8 +136,8 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>14</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>16</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -147,8 +147,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>15</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -157,8 +157,8 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>16</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -169,18 +169,18 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722872</MTIME>
-      <ATIME>1394158722872</ATIME>
+      <MTIME>1402899229871</MTIME>
+      <ATIME>1402899229871</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1233039831_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>17</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -191,13 +191,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722874</MTIME>
-      <ATIME>1394158722872</ATIME>
+      <MTIME>1402899229881</MTIME>
+      <ATIME>1402899229871</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -253,10 +253,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1394158722890</TIMESTAMP>
+      <TIMESTAMP>1402899229963</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>24</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>26</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -267,18 +267,18 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722895</MTIME>
-      <ATIME>1394158722895</ATIME>
+      <MTIME>1402899229981</MTIME>
+      <ATIME>1402899229981</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1233039831_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>26</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>28</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -383,8 +383,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722986</MTIME>
-      <ATIME>1394158722895</ATIME>
+      <MTIME>1402899230219</MTIME>
+      <ATIME>1402899229981</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -404,7 +404,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -418,18 +418,18 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158722989</MTIME>
-      <ATIME>1394158722989</ATIME>
+      <MTIME>1402899230235</MTIME>
+      <ATIME>1402899230235</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1233039831_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>39</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>41</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -534,8 +534,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158723010</MTIME>
-      <ATIME>1394158722989</ATIME>
+      <MTIME>1402899230307</MTIME>
+      <ATIME>1402899230235</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -555,7 +555,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -569,18 +569,18 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158723012</MTIME>
-      <ATIME>1394158723012</ATIME>
+      <MTIME>1402899230320</MTIME>
+      <ATIME>1402899230320</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1233039831_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>51</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>53</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -685,8 +685,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158723035</MTIME>
-      <ATIME>1394158723012</ATIME>
+      <MTIME>1402899230383</MTIME>
+      <ATIME>1402899230320</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -706,7 +706,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -718,13 +718,13 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1394158723039</TIMESTAMP>
+      <TIMESTAMP>1402899230394</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>62</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -735,15 +735,15 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1394158723044</MTIME>
-      <ATIME>1394158723044</ATIME>
+      <MTIME>1402899230406</MTIME>
+      <ATIME>1402899230406</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>63</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>65</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -754,18 +754,18 @@
       <INODEID>16393</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158723047</MTIME>
-      <ATIME>1394158723047</ATIME>
+      <MTIME>1402899230413</MTIME>
+      <ATIME>1402899230413</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_221786725_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_1233039831_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>64</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>66</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -821,7 +821,7 @@
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>64</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_221786725_1</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_1233039831_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -834,8 +834,8 @@
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1394158725708</MTIME>
-      <ATIME>1394158723047</ATIME>
+      <MTIME>1402899232526</MTIME>
+      <ATIME>1402899230413</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -845,7 +845,7 @@
         <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>jing</USERNAME>
+        <USERNAME>andrew</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -856,13 +856,13 @@
     <DATA>
       <TXID>66</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <OWNERNAME>jing</OWNERNAME>
-      <GROUPNAME>staff</GROUPNAME>
+      <OWNERNAME>andrew</OWNERNAME>
+      <GROUPNAME>andrew</GROUPNAME>
       <MODE>493</MODE>
       <LIMIT>9223372036854775807</LIMIT>
       <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>71</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>73</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -871,8 +871,8 @@
       <TXID>67</TXID>
       <POOLNAME>pool1</POOLNAME>
       <LIMIT>99</LIMIT>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>72</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>74</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -883,9 +883,9 @@
       <PATH>/path</PATH>
       <REPLICATION>1</REPLICATION>
       <POOL>pool1</POOL>
-      <EXPIRATION>2305844403372420029</EXPIRATION>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>73</RPC_CALLID>
+      <EXPIRATION>2305844412112927450</EXPIRATION>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>75</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -894,8 +894,8 @@
       <TXID>69</TXID>
       <ID>1</ID>
       <REPLICATION>2</REPLICATION>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>74</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>76</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -903,8 +903,8 @@
     <DATA>
       <TXID>70</TXID>
       <ID>1</ID>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>75</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>77</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -912,8 +912,8 @@
     <DATA>
       <TXID>71</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>76</RPC_CALLID>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>78</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -921,51 +921,91 @@
     <DATA>
       <TXID>72</TXID>
       <SRC>/file_concat_target</SRC>
+      <ENTRY>
+        <SCOPE>ACCESS</SCOPE>
+        <TYPE>USER</TYPE>
+        <PERM>rw-</PERM>
+      </ENTRY>
+      <ENTRY>
+        <SCOPE>ACCESS</SCOPE>
+        <TYPE>USER</TYPE>
+        <NAME>user</NAME>
+        <PERM>rw-</PERM>
+      </ENTRY>
+      <ENTRY>
+        <SCOPE>ACCESS</SCOPE>
+        <TYPE>GROUP</TYPE>
+        <PERM>-w-</PERM>
+      </ENTRY>
+      <ENTRY>
+        <SCOPE>ACCESS</SCOPE>
+        <TYPE>MASK</TYPE>
+        <PERM>rw-</PERM>
+      </ENTRY>
+      <ENTRY>
+        <SCOPE>ACCESS</SCOPE>
+        <TYPE>OTHER</TYPE>
+        <PERM>---</PERM>
+      </ENTRY>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
+    <OPCODE>OP_SET_XATTR</OPCODE>
     <DATA>
       <TXID>73</TXID>
-      <STARTTIME>1394158726098</STARTTIME>
+      <SRC>/file_concat_target</SRC>
+      <XATTR>
+        <NAMESPACE>USER</NAMESPACE>
+        <NAME>a1</NAME>
+        <VALUE>0x313233</VALUE>
+      </XATTR>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>80</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
+    <OPCODE>OP_SET_XATTR</OPCODE>
     <DATA>
       <TXID>74</TXID>
-      <FINALIZETIME>1394158726098</FINALIZETIME>
+      <SRC>/file_concat_target</SRC>
+      <XATTR>
+        <NAMESPACE>USER</NAMESPACE>
+        <NAME>a2</NAME>
+        <VALUE>0x373839</VALUE>
+      </XATTR>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>81</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_SET_XATTR</OPCODE>
+    <OPCODE>OP_REMOVE_XATTR</OPCODE>
     <DATA>
       <TXID>75</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
-        <NAME>a1</NAME>
-        <VALUE>0x313233</VALUE>
+        <NAME>a2</NAME>
       </XATTR>
-      <RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
-      <RPC_CALLID>80</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_REMOVE_XATTR</OPCODE>
+    <OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
     <DATA>
       <TXID>76</TXID>
-      <SRC>/file_concat_target</SRC>
-      <XATTR>
-        <NAMESPACE>USER</NAMESPACE>
-        <NAME>a1</NAME>
-      </XATTR>
+      <STARTTIME>1402899233646</STARTTIME>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
+    <OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
     <DATA>
       <TXID>77</TXID>
+      <FINALIZETIME>1402899233647</FINALIZETIME>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
+    <DATA>
+      <TXID>78</TXID>
     </DATA>
   </RECORD>
 </EDITS>



Mime
View raw message