hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1556552 [3/4] - in /hadoop/common/branches/HDFS-5535/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ h...
Date Wed, 08 Jan 2014 14:36:13 GMT
Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java Wed Jan  8 14:36:09 2014
@@ -53,7 +53,7 @@ abstract class AbstractINodeDiffList<N e
   }
 
   /** @return an {@link AbstractINodeDiff}. */
-  abstract D createDiff(Snapshot snapshot, N currentINode);
+  abstract D createDiff(int snapshotId, N currentINode);
 
   /** @return a snapshot copy of the current inode. */  
   abstract A createSnapshotCopy(N currentINode);
@@ -63,25 +63,25 @@ abstract class AbstractINodeDiffList<N e
    * outside. If the diff to remove is not the first one in the diff list, we 
    * need to combine the diff with its previous one.
    * 
-   * @param snapshot The snapshot to be deleted
-   * @param prior The snapshot taken before the to-be-deleted snapshot
+   * @param snapshot The id of the snapshot to be deleted
+   * @param prior The id of the snapshot taken before the to-be-deleted snapshot
    * @param collectedBlocks Used to collect information for blocksMap update
    * @return delta in namespace. 
    */
-  public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
-      Snapshot prior, final N currentINode,
+  public final Quota.Counts deleteSnapshotDiff(final int snapshot,
+      final int prior, final N currentINode,
       final BlocksMapUpdateInfo collectedBlocks,
       final List<INode> removedINodes, boolean countDiffChange) 
       throws QuotaExceededException {
-    int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId());
+    int snapshotIndex = Collections.binarySearch(diffs, snapshot);
     
     Quota.Counts counts = Quota.Counts.newInstance();
     D removed = null;
     if (snapshotIndex == 0) {
-      if (prior != null) {
+      if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
         // set the snapshot to latestBefore
-        diffs.get(snapshotIndex).setSnapshot(prior);
-      } else {
+        diffs.get(snapshotIndex).setSnapshotId(prior);
+      } else { // there is no snapshot before
         removed = diffs.remove(0);
         if (countDiffChange) {
           counts.add(Quota.NAMESPACE, 1);
@@ -96,8 +96,8 @@ abstract class AbstractINodeDiffList<N e
       }
     } else if (snapshotIndex > 0) {
       final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
-      if (!previous.getSnapshot().equals(prior)) {
-        diffs.get(snapshotIndex).setSnapshot(prior);
+      if (previous.getSnapshotId() != prior) {
+        diffs.get(snapshotIndex).setSnapshotId(prior);
       } else {
         // combine the to-be-removed diff with its previous diff
         removed = diffs.remove(snapshotIndex);
@@ -120,10 +120,10 @@ abstract class AbstractINodeDiffList<N e
   }
 
   /** Add an {@link AbstractINodeDiff} for the given snapshot. */
-  final D addDiff(Snapshot latest, N currentINode)
+  final D addDiff(int latestSnapshotId, N currentINode)
       throws QuotaExceededException {
     currentINode.addSpaceConsumed(1, 0, true);
-    return addLast(createDiff(latest, currentINode));
+    return addLast(createDiff(latestSnapshotId, currentINode));
   }
 
   /** Append the diff at the end of the list. */
@@ -149,10 +149,10 @@ abstract class AbstractINodeDiffList<N e
     return n == 0? null: diffs.get(n - 1);
   }
 
-  /** @return the last snapshot. */
-  public final Snapshot getLastSnapshot() {
+  /** @return the id of the last snapshot. */
+  public final int getLastSnapshotId() {
     final AbstractINodeDiff<N, A, D> last = getLast();
-    return last == null? null: last.getSnapshot();
+    return last == null ? Snapshot.CURRENT_STATE_ID : last.getSnapshotId();
   }
   
   /**
@@ -161,60 +161,49 @@ abstract class AbstractINodeDiffList<N e
    *                 snapshot id.
    * @param exclusive True means the returned snapshot's id must be < the given
    *                  id, otherwise <=.
-   * @return The latest snapshot before the given snapshot.
+   * @return The id of the latest snapshot before the given snapshot.
    */
-  private final Snapshot getPrior(int anchorId, boolean exclusive) {
-    if (anchorId == Snapshot.INVALID_ID) {
-      return getLastSnapshot();
+  private final int getPrior(int anchorId, boolean exclusive) {
+    if (anchorId == Snapshot.CURRENT_STATE_ID) {
+      return getLastSnapshotId();
     }
     final int i = Collections.binarySearch(diffs, anchorId);
     if (exclusive) { // must be the one before
       if (i == -1 || i == 0) {
-        return null;
+        return Snapshot.NO_SNAPSHOT_ID;
       } else {
         int priorIndex = i > 0 ? i - 1 : -i - 2;
-        return diffs.get(priorIndex).getSnapshot();
+        return diffs.get(priorIndex).getSnapshotId();
       }
     } else { // the one, or the one before if not existing
       if (i >= 0) {
-        return diffs.get(i).getSnapshot();
+        return diffs.get(i).getSnapshotId();
       } else if (i < -1) {
-        return diffs.get(-i - 2).getSnapshot();
+        return diffs.get(-i - 2).getSnapshotId();
       } else { // i == -1
-        return null;
+        return Snapshot.NO_SNAPSHOT_ID;
       }
     }
   }
   
-  public final Snapshot getPrior(int snapshotId) {
+  public final int getPrior(int snapshotId) {
     return getPrior(snapshotId, false);
   }
   
   /**
    * Update the prior snapshot.
    */
-  final Snapshot updatePrior(Snapshot snapshot, Snapshot prior) {
-    int id = snapshot == null ? Snapshot.INVALID_ID : snapshot.getId();
-    Snapshot s = getPrior(id, true);
-    if (s != null && 
-        (prior == null || Snapshot.ID_COMPARATOR.compare(s, prior) > 0)) {
-      return s;
+  final int updatePrior(int snapshot, int prior) {
+    int p = getPrior(snapshot, true);
+    if (p != Snapshot.CURRENT_STATE_ID
+        && Snapshot.ID_INTEGER_COMPARATOR.compare(p, prior) > 0) {
+      return p;
     }
     return prior;
   }
-
-  /**
-   * @return the diff corresponding to the given snapshot.
-   *         When the diff is null, it means that the current state and
-   *         the corresponding snapshot state are the same. 
-   */
-  public final D getDiff(Snapshot snapshot) {
-    return getDiffById(snapshot == null ? 
-        Snapshot.INVALID_ID : snapshot.getId());
-  }
   
-  private final D getDiffById(final int snapshotId) {
-    if (snapshotId == Snapshot.INVALID_ID) {
+  public final D getDiffById(final int snapshotId) {
+    if (snapshotId == Snapshot.CURRENT_STATE_ID) {
       return null;
     }
     final int i = Collections.binarySearch(diffs, snapshotId);
@@ -234,9 +223,9 @@ abstract class AbstractINodeDiffList<N e
    * Search for the snapshot whose id is 1) no less than the given id, 
    * and 2) most close to the given id.
    */
-  public final Snapshot getSnapshotById(final int snapshotId) {
+  public final int getSnapshotById(final int snapshotId) {
     D diff = getDiffById(snapshotId);
-    return diff == null ? null : diff.getSnapshot();
+    return diff == null ? Snapshot.CURRENT_STATE_ID : diff.getSnapshotId();
   }
   
   /**
@@ -271,8 +260,8 @@ abstract class AbstractINodeDiffList<N e
    *         Note that the current inode is returned if there is no change
    *         between the given snapshot and the current state. 
    */
-  public A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
-    final D diff = getDiff(snapshot);
+  public A getSnapshotINode(final int snapshotId, final A currentINode) {
+    final D diff = getDiffById(snapshotId);
     final A inode = diff == null? null: diff.getSnapshotINode();
     return inode == null? currentINode: inode;
   }
@@ -281,15 +270,16 @@ abstract class AbstractINodeDiffList<N e
    * Check if the latest snapshot diff exists.  If not, add it.
    * @return the latest snapshot diff, which is never null.
    */
-  final D checkAndAddLatestSnapshotDiff(Snapshot latest, N currentINode)
+  final D checkAndAddLatestSnapshotDiff(int latestSnapshotId, N currentINode)
       throws QuotaExceededException {
     final D last = getLast();
     if (last != null
-        && Snapshot.ID_COMPARATOR.compare(last.getSnapshot(), latest) >= 0) {
+        && Snapshot.ID_INTEGER_COMPARATOR.compare(last.getSnapshotId(),
+            latestSnapshotId) >= 0) {
       return last;
     } else {
       try {
-        return addDiff(latest, currentINode);
+        return addDiff(latestSnapshotId, currentINode);
       } catch(NSQuotaExceededException e) {
         e.setMessagePrefix("Failed to record modification for snapshot");
         throw e;
@@ -298,10 +288,10 @@ abstract class AbstractINodeDiffList<N e
   }
 
   /** Save the snapshot copy to the latest snapshot. */
-  public void saveSelf2Snapshot(Snapshot latest, N currentINode, A snapshotCopy)
-      throws QuotaExceededException {
-    if (latest != null) {
-      D diff = checkAndAddLatestSnapshotDiff(latest, currentINode);
+  public void saveSelf2Snapshot(int latestSnapshotId, N currentINode,
+      A snapshotCopy) throws QuotaExceededException {
+    if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
+      D diff = checkAndAddLatestSnapshotDiff(latestSnapshotId, currentINode);
       if (diff.snapshotINode == null) {
         if (snapshotCopy == null) {
           snapshotCopy = createSnapshotCopy(currentINode);

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java Wed Jan  8 14:36:09 2014
@@ -225,30 +225,36 @@ public class DirectoryWithSnapshotFeatur
     private final int childrenSize;
     /** The children list diff. */
     private final ChildrenDiff diff;
+    private boolean isSnapshotRoot = false;
+    
+    private DirectoryDiff(int snapshotId, INodeDirectory dir) {
+      super(snapshotId, null, null);
 
-    private DirectoryDiff(Snapshot snapshot, INodeDirectory dir) {
-      super(snapshot, null, null);
-
-      this.childrenSize = dir.getChildrenList(null).size();
+      this.childrenSize = dir.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
       this.diff = new ChildrenDiff();
     }
 
     /** Constructor used by FSImage loading */
-    DirectoryDiff(Snapshot snapshot, INodeDirectoryAttributes snapshotINode,
-        DirectoryDiff posteriorDiff, int childrenSize,
-        List<INode> createdList, List<INode> deletedList) {
-      super(snapshot, snapshotINode, posteriorDiff);
+    DirectoryDiff(int snapshotId, INodeDirectoryAttributes snapshotINode,
+        DirectoryDiff posteriorDiff, int childrenSize, List<INode> createdList,
+        List<INode> deletedList, boolean isSnapshotRoot) {
+      super(snapshotId, snapshotINode, posteriorDiff);
       this.childrenSize = childrenSize;
       this.diff = new ChildrenDiff(createdList, deletedList);
+      this.isSnapshotRoot = isSnapshotRoot;
     }
 
     ChildrenDiff getChildrenDiff() {
       return diff;
     }
-
-    /** Is the inode the root of the snapshot? */
+    
+    void setSnapshotRoot(INodeDirectoryAttributes root) {
+      this.snapshotINode = root;
+      this.isSnapshotRoot = true;
+    }
+    
     boolean isSnapshotRoot() {
-      return snapshotINode == snapshot.getRoot();
+      return isSnapshotRoot;
     }
 
     @Override
@@ -287,7 +293,7 @@ public class DirectoryWithSnapshotFeatur
               combined.combinePosterior(d.diff, null);
             }
             children = combined.apply2Current(ReadOnlyList.Util.asList(
-                currentDir.getChildrenList(null)));
+                currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
           }
           return children;
         }
@@ -327,7 +333,7 @@ public class DirectoryWithSnapshotFeatur
           return null;
         } else if (d.getPosterior() == null) {
           // no more posterior diff, get from current inode.
-          return currentDir.getChild(name, null);
+          return currentDir.getChild(name, Snapshot.CURRENT_STATE_ID);
         }
       }
     }
@@ -342,11 +348,9 @@ public class DirectoryWithSnapshotFeatur
       writeSnapshot(out);
       out.writeInt(childrenSize);
 
-      // write snapshotINode
-      if (isSnapshotRoot()) {
-        out.writeBoolean(true);
-      } else {
-        out.writeBoolean(false);
+      // Write snapshotINode
+      out.writeBoolean(isSnapshotRoot);
+      if (!isSnapshotRoot) {
         if (snapshotINode != null) {
           out.writeBoolean(true);
           FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out);
@@ -373,7 +377,7 @@ public class DirectoryWithSnapshotFeatur
       extends AbstractINodeDiffList<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
 
     @Override
-    DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) {
+    DirectoryDiff createDiff(int snapshot, INodeDirectory currentDir) {
       return new DirectoryDiff(snapshot, currentDir);
     }
 
@@ -424,12 +428,13 @@ public class DirectoryWithSnapshotFeatur
   /**
    * Destroy a subtree under a DstReference node.
    */
-  public static void destroyDstSubtree(INode inode, final Snapshot snapshot,
-      final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
+  public static void destroyDstSubtree(INode inode, final int snapshot,
+      final int prior, final BlocksMapUpdateInfo collectedBlocks,
       final List<INode> removedINodes) throws QuotaExceededException {
-    Preconditions.checkArgument(prior != null);
+    Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
     if (inode.isReference()) {
-      if (inode instanceof INodeReference.WithName && snapshot != null) {
+      if (inode instanceof INodeReference.WithName
+          && snapshot != Snapshot.CURRENT_STATE_ID) {
         // this inode has been renamed before the deletion of the DstReference
         // subtree
         inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes,
@@ -447,18 +452,18 @@ public class DirectoryWithSnapshotFeatur
       DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
       if (sf != null) {
         DirectoryDiffList diffList = sf.getDiffs();
-        DirectoryDiff priorDiff = diffList.getDiff(prior);
-        if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+        DirectoryDiff priorDiff = diffList.getDiffById(prior);
+        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
           List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
           excludedNodes = cloneDiffList(dList);
         }
         
-        if (snapshot != null) {
+        if (snapshot != Snapshot.CURRENT_STATE_ID) {
           diffList.deleteSnapshotDiff(snapshot, prior, dir, collectedBlocks,
               removedINodes, true);
         }
-        priorDiff = diffList.getDiff(prior);
-        if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+        priorDiff = diffList.getDiffById(prior);
+        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
           priorDiff.diff.destroyCreatedList(dir, collectedBlocks,
               removedINodes);
         }
@@ -478,14 +483,14 @@ public class DirectoryWithSnapshotFeatur
    * deleted list of prior.
    * @param inode The inode to clean.
    * @param post The post snapshot.
-   * @param prior The prior snapshot.
+   * @param prior The id of the prior snapshot.
    * @param collectedBlocks Used to collect blocks for later deletion.
    * @return Quota usage update.
    */
   private static Quota.Counts cleanDeletedINode(INode inode,
-      final Snapshot post, final Snapshot prior,
+      final int post, final int prior,
       final BlocksMapUpdateInfo collectedBlocks,
-      final List<INode> removedINodes, final boolean countDiffChange) 
+      final List<INode> removedINodes, final boolean countDiffChange)
       throws QuotaExceededException {
     Quota.Counts counts = Quota.Counts.newInstance();
     Deque<INode> queue = new ArrayDeque<INode>();
@@ -494,7 +499,7 @@ public class DirectoryWithSnapshotFeatur
       INode topNode = queue.pollFirst();
       if (topNode instanceof INodeReference.WithName) {
         INodeReference.WithName wn = (INodeReference.WithName) topNode;
-        if (wn.getLastSnapshotId() >= post.getId()) {
+        if (wn.getLastSnapshotId() >= post) {
           wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
               countDiffChange);
         }
@@ -511,8 +516,8 @@ public class DirectoryWithSnapshotFeatur
         if (sf != null) {
           // delete files/dirs created after prior. Note that these
           // files/dirs, along with inode, were deleted right after post.
-          DirectoryDiff priorDiff = sf.getDiffs().getDiff(prior);
-          if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+          DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
+          if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
             priorChildrenDiff = priorDiff.getChildrenDiff();
             counts.add(priorChildrenDiff.destroyCreatedList(dir,
                 collectedBlocks, removedINodes));
@@ -540,8 +545,8 @@ public class DirectoryWithSnapshotFeatur
   }
 
   /** @return the last snapshot. */
-  public Snapshot getLastSnapshot() {
-    return diffs.getLastSnapshot();
+  public int getLastSnapshotId() {
+    return diffs.getLastSnapshotId();
   }
 
   /** @return the snapshot diff list. */
@@ -565,11 +570,13 @@ public class DirectoryWithSnapshotFeatur
    * to make sure that parent is in the given snapshot "latest".
    */
   public boolean addChild(INodeDirectory parent, INode inode,
-      boolean setModTime, Snapshot latest) throws QuotaExceededException {
-    ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff;
+      boolean setModTime, int latestSnapshotId) throws QuotaExceededException {
+    ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
+        parent).diff;
     int undoInfo = diff.create(inode);
 
-    final boolean added = parent.addChild(inode, setModTime, null);
+    final boolean added = parent.addChild(inode, setModTime,
+        Snapshot.CURRENT_STATE_ID);
     if (!added) {
       diff.undoCreate(inode, undoInfo);
     }
@@ -581,7 +588,7 @@ public class DirectoryWithSnapshotFeatur
    * needs to make sure that parent is in the given snapshot "latest".
    */
   public boolean removeChild(INodeDirectory parent, INode child,
-      Snapshot latest) throws QuotaExceededException {
+      int latestSnapshotId) throws QuotaExceededException {
     // For a directory that is not a renamed node, if isInLatestSnapshot returns
     // false, the directory is not in the latest snapshot, thus we do not need
     // to record the removed child in any snapshot.
@@ -593,7 +600,8 @@ public class DirectoryWithSnapshotFeatur
     // directory node cannot be in any snapshot (not in current tree, nor in
     // previous src tree). Thus we do not need to record the removed child in
     // any snapshot.
-    ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, parent).diff;
+    ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
+        parent).diff;
     UndoInfo<INode> undoInfo = diff.delete(child);
 
     final boolean removed = parent.removeChild(child);
@@ -611,29 +619,29 @@ public class DirectoryWithSnapshotFeatur
    *         for the snapshot and return it. 
    */
   public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
-      final Snapshot snapshot) {
-    final DirectoryDiff diff = diffs.getDiff(snapshot);
+      final int snapshotId) {
+    final DirectoryDiff diff = diffs.getDiffById(snapshotId);
     return diff != null ? diff.getChildrenList(currentINode) : currentINode
-        .getChildrenList(null);
+        .getChildrenList(Snapshot.CURRENT_STATE_ID);
   }
   
   public INode getChild(INodeDirectory currentINode, byte[] name,
-      Snapshot snapshot) {
-    final DirectoryDiff diff = diffs.getDiff(snapshot);
+      int snapshotId) {
+    final DirectoryDiff diff = diffs.getDiffById(snapshotId);
     return diff != null ? diff.getChild(name, true, currentINode)
-        : currentINode.getChild(name, null);
+        : currentINode.getChild(name, Snapshot.CURRENT_STATE_ID);
   }
   
   /** Used to record the modification of a symlink node */
   public INode saveChild2Snapshot(INodeDirectory currentINode,
-      final INode child, final Snapshot latest, final INode snapshotCopy)
+      final INode child, final int latestSnapshotId, final INode snapshotCopy)
       throws QuotaExceededException {
     Preconditions.checkArgument(!child.isDirectory(),
         "child is a directory, child=%s", child);
-    Preconditions.checkArgument(latest != null);
+    Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
     
-    final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest,
-        currentINode);
+    final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(
+        latestSnapshotId, currentINode);
     if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) {
       // it was already saved in the latest snapshot earlier.  
       return child;
@@ -656,7 +664,7 @@ public class DirectoryWithSnapshotFeatur
   public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
     for(DirectoryDiff d : diffs) {
       for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
-        deleted.computeQuotaUsage(counts, false, Snapshot.INVALID_ID);
+        deleted.computeQuotaUsage(counts, false, Snapshot.CURRENT_STATE_ID);
       }
     }
     counts.add(Quota.NAMESPACE, diffs.asList().size());
@@ -744,14 +752,14 @@ public class DirectoryWithSnapshotFeatur
   }
 
   public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
-      final Snapshot snapshot, Snapshot prior,
+      final int snapshot, int prior,
       final BlocksMapUpdateInfo collectedBlocks,
       final List<INode> removedINodes, final boolean countDiffChange)
       throws QuotaExceededException {
     Quota.Counts counts = Quota.Counts.newInstance();
     Map<INode, INode> priorCreated = null;
     Map<INode, INode> priorDeleted = null;
-    if (snapshot == null) { // delete the current directory
+    if (snapshot == Snapshot.CURRENT_STATE_ID) { // delete the current directory
       currentINode.recordModification(prior);
       // delete everything in created list
       DirectoryDiff lastDiff = diffs.getLast();
@@ -764,9 +772,9 @@ public class DirectoryWithSnapshotFeatur
       prior = getDiffs().updatePrior(snapshot, prior);
       // if there is a snapshot diff associated with prior, we need to record
       // its original created and deleted list before deleting post
-      if (prior != null) {
-        DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
-        if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+      if (prior != Snapshot.NO_SNAPSHOT_ID) {
+        DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
+        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
           List<INode> cList = priorDiff.diff.getList(ListType.CREATED);
           List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
           priorCreated = cloneDiffList(cList);
@@ -774,13 +782,13 @@ public class DirectoryWithSnapshotFeatur
         }
       }
       
-      counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, currentINode, 
-          collectedBlocks, removedINodes, countDiffChange));
+      counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
+          currentINode, collectedBlocks, removedINodes, countDiffChange));
       
       // check priorDiff again since it may be created during the diff deletion
-      if (prior != null) {
-        DirectoryDiff priorDiff = this.getDiffs().getDiff(prior);
-        if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) {
+      if (prior != Snapshot.NO_SNAPSHOT_ID) {
+        DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
+        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
           // For files/directories created between "prior" and "snapshot", 
           // we need to clear snapshot copies for "snapshot". Note that we must
           // use null as prior in the cleanSubtree call. Files/directories that
@@ -791,8 +799,8 @@ public class DirectoryWithSnapshotFeatur
             for (INode cNode : priorDiff.getChildrenDiff().getList(
                 ListType.CREATED)) {
               if (priorCreated.containsKey(cNode)) {
-                counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks,
-                    removedINodes, countDiffChange));
+                counts.add(cNode.cleanSubtree(snapshot, Snapshot.NO_SNAPSHOT_ID,
+                    collectedBlocks, removedINodes, countDiffChange));
               }
             }
           }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java Wed Jan  8 14:36:09 2014
@@ -38,15 +38,15 @@ public class FileDiff extends
   /** The file size at snapshot creation time. */
   private final long fileSize;
 
-  FileDiff(Snapshot snapshot, INodeFile file) {
-    super(snapshot, null, null);
+  FileDiff(int snapshotId, INodeFile file) {
+    super(snapshotId, null, null);
     fileSize = file.computeFileSize();
   }
 
   /** Constructor used by FSImage loading */
-  FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
+  FileDiff(int snapshotId, INodeFileAttributes snapshotINode,
       FileDiff posteriorDiff, long fileSize) {
-    super(snapshot, snapshotINode, posteriorDiff);
+    super(snapshotId, snapshotINode, posteriorDiff);
     this.fileSize = fileSize;
   }
 

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java Wed Jan  8 14:36:09 2014
@@ -25,8 +25,8 @@ public class FileDiffList extends
     AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
   
   @Override
-  FileDiff createDiff(Snapshot snapshot, INodeFile file) {
-    return new FileDiff(snapshot, file);
+  FileDiff createDiff(int snapshotId, INodeFile file) {
+    return new FileDiff(snapshotId, file);
   }
   
   @Override

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java Wed Jan  8 14:36:09 2014
@@ -78,22 +78,22 @@ public class FileWithSnapshotFeature imp
     return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
   }
   
-  public Quota.Counts cleanFile(final INodeFile file, final Snapshot snapshot,
-      Snapshot prior, final BlocksMapUpdateInfo collectedBlocks,
+  public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
+      int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
       final List<INode> removedINodes, final boolean countDiffChange)
       throws QuotaExceededException {
-    if (snapshot == null) {
+    if (snapshotId == Snapshot.CURRENT_STATE_ID) {
       // delete the current file while the file has snapshot feature
       if (!isCurrentFileDeleted()) {
-        file.recordModification(prior);
+        file.recordModification(priorSnapshotId);
         deleteCurrentFile();
       }
       collectBlocksAndClear(file, collectedBlocks, removedINodes);
       return Quota.Counts.newInstance();
     } else { // delete the snapshot
-      prior = getDiffs().updatePrior(snapshot, prior);
-      return diffs.deleteSnapshotDiff(snapshot, prior, file, collectedBlocks,
-          removedINodes, countDiffChange);
+      priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
+      return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
+          collectedBlocks, removedINodes, countDiffChange);
     }
   }
   

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Wed Jan  8 14:36:09 2014
@@ -206,6 +206,15 @@ public class INodeDirectorySnapshottable
     return i < 0? null: snapshotsByNames.get(i);
   }
   
+  Snapshot getSnapshotById(int sid) {
+    for (Snapshot s : snapshotsByNames) {
+      if (s.getId() == sid) {
+        return s;
+      }
+    }
+    return null;
+  }
+  
   /** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */
   public ReadOnlyList<Snapshot> getSnapshotList() {
     return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames);
@@ -297,13 +306,14 @@ public class INodeDirectorySnapshottable
           + "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
     }
 
-    final DirectoryDiff d = getDiffs().addDiff(s, this);
-    d.snapshotINode = s.getRoot();
+    final DirectoryDiff d = getDiffs().addDiff(id, this);
+    d.setSnapshotRoot(s.getRoot());
     snapshotsByNames.add(-i - 1, s);
 
     //set modification time
-    updateModificationTime(Time.now(), null);
-    s.getRoot().setModificationTime(getModificationTime(), null);
+    updateModificationTime(Time.now(), Snapshot.CURRENT_STATE_ID);
+    s.getRoot().setModificationTime(getModificationTime(),
+        Snapshot.CURRENT_STATE_ID);
     return s;
   }
   
@@ -326,10 +336,10 @@ public class INodeDirectorySnapshottable
           + ": the snapshot does not exist.");
     } else {
       final Snapshot snapshot = snapshotsByNames.get(i);
-      Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
+      int prior = Snapshot.findLatestSnapshot(this, snapshot.getId());
       try {
-        Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
-            removedINodes, true);
+        Quota.Counts counts = cleanSubtree(snapshot.getId(), prior,
+            collectedBlocks, removedINodes, true);
         INodeDirectory parent = getParent();
         if (parent != null) {
           // there will not be any WithName node corresponding to the deleted 
@@ -425,8 +435,9 @@ public class INodeDirectorySnapshottable
           diffReport.addDirDiff(dir, relativePath, diff);
         }
       }
-      ReadOnlyList<INode> children = dir.getChildrenList(diffReport
-          .isFromEarlier() ? diffReport.to : diffReport.from);
+      ReadOnlyList<INode> children = dir.getChildrenList(
+          diffReport.isFromEarlier() ? Snapshot.getSnapshotId(diffReport.to) : 
+            Snapshot.getSnapshotId(diffReport.from));
       for (INode child : children) {
         final byte[] name = child.getLocalNameBytes();
         if (diff.searchIndex(ListType.CREATED, name) < 0
@@ -454,16 +465,15 @@ public class INodeDirectorySnapshottable
    * Replace itself with {@link INodeDirectoryWithSnapshot} or
    * {@link INodeDirectory} depending on the latest snapshot.
    */
-  INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
+  INodeDirectory replaceSelf(final int latestSnapshotId, final INodeMap inodeMap)
       throws QuotaExceededException {
-    if (latest == null) {
-      Preconditions.checkState(
-          getDirectoryWithSnapshotFeature().getLastSnapshot() == null,
-          "latest == null but getLastSnapshot() != null, this=%s", this);
+    if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
+      Preconditions.checkState(getDirectoryWithSnapshotFeature()
+          .getLastSnapshotId() == Snapshot.CURRENT_STATE_ID, "this=%s", this);
     }
     INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
-    if (latest != null) {
-      dir.recordModification(latest);
+    if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
+      dir.recordModification(latestSnapshotId);
     }
     return dir;
   }
@@ -475,10 +485,10 @@ public class INodeDirectorySnapshottable
 
   @Override
   public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
-      Snapshot snapshot) {
+      int snapshot) {
     super.dumpTreeRecursively(out, prefix, snapshot);
 
-    if (snapshot == null) {
+    if (snapshot == Snapshot.CURRENT_STATE_ID) {
       out.println();
       out.print(prefix);
 
@@ -494,7 +504,8 @@ public class INodeDirectorySnapshottable
           n++;
         }
       }
-      Preconditions.checkState(n == snapshotsByNames.size());
+      Preconditions.checkState(n == snapshotsByNames.size(), "#n=" + n
+          + ", snapshotsByNames.size()=" + snapshotsByNames.size());
       out.print(", #snapshot=");
       out.println(n);
 
@@ -522,8 +533,9 @@ public class INodeDirectorySnapshottable
   
             @Override
             public SnapshotAndINode next() {
-              final Snapshot s = next.snapshot;
-              final SnapshotAndINode pair = new SnapshotAndINode(s);
+              final SnapshotAndINode pair = new SnapshotAndINode(next
+                  .getSnapshotId(), getSnapshotById(next.getSnapshotId())
+                  .getRoot());
               next = findNext();
               return pair;
             }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Wed Jan  8 14:36:09 2014
@@ -37,7 +37,11 @@ import org.apache.hadoop.hdfs.util.ReadO
 /** Snapshot of a sub-tree in the namesystem. */
 @InterfaceAudience.Private
 public class Snapshot implements Comparable<byte[]> {
-  public static final int INVALID_ID = -1;
+  /**
+   * This id is used to indicate the current state (vs. snapshots)
+   */
+  public static final int CURRENT_STATE_ID = Integer.MAX_VALUE - 1;
+  public static final int NO_SNAPSHOT_ID = -1;
   
   /**
    * The pattern for generating the default snapshot name.
@@ -61,14 +65,18 @@ public class Snapshot implements Compara
         .toString();
   }
   
-  /** 
-   * Get the name of the given snapshot. 
+  /**
+   * Get the name of the given snapshot.
    * @param s The given snapshot.
    * @return The name of the snapshot, or an empty string if {@code s} is null
    */
   static String getSnapshotName(Snapshot s) {
     return s != null ? s.getRoot().getLocalName() : "";
   }
+  
+  public static int getSnapshotId(Snapshot s) {
+    return s == null ? CURRENT_STATE_ID : s.getId();
+  }
 
   /**
    * Compare snapshot with IDs, where null indicates the current status thus
@@ -78,9 +86,8 @@ public class Snapshot implements Compara
       = new Comparator<Snapshot>() {
     @Override
     public int compare(Snapshot left, Snapshot right) {
-      return ID_INTEGER_COMPARATOR.compare(
-          left == null? null: left.getId(),
-          right == null? null: right.getId());
+      return ID_INTEGER_COMPARATOR.compare(Snapshot.getSnapshotId(left),
+          Snapshot.getSnapshotId(right));
     }
   };
 
@@ -92,12 +99,9 @@ public class Snapshot implements Compara
       = new Comparator<Integer>() {
     @Override
     public int compare(Integer left, Integer right) {
-      // null means the current state, thus should be the largest
-      if (left == null) {
-        return right == null? 0: 1;
-      } else {
-        return right == null? -1: left - right; 
-      }
+      // Snapshot.CURRENT_STATE_ID means the current state, thus should be the 
+      // largest
+      return left - right;
     }
   };
 
@@ -108,12 +112,12 @@ public class Snapshot implements Compara
    * is not null).
    * 
    * @param inode the given inode that the returned snapshot needs to cover
-   * @param anchor the returned snapshot should be taken before this snapshot.
-   * @return the latest snapshot covers the given inode and was taken before the
-   *         the given snapshot (if it is not null).
+   * @param anchor the returned snapshot should be taken before this given id.
+   * @return id of the latest snapshot that covers the given inode and was taken 
+   *         before the the given snapshot (if it is not null).
    */
-  public static Snapshot findLatestSnapshot(INode inode, Snapshot anchor) {
-    Snapshot latest = null;
+  public static int findLatestSnapshot(INode inode, final int anchor) {
+    int latest = NO_SNAPSHOT_ID;
     for(; inode != null; inode = inode.getParent()) {
       if (inode.isDirectory()) {
         final INodeDirectory dir = inode.asDirectory();
@@ -139,13 +143,13 @@ public class Snapshot implements Compara
     }
 
     @Override
-    public ReadOnlyList<INode> getChildrenList(Snapshot snapshot) {
-      return getParent().getChildrenList(snapshot);
+    public ReadOnlyList<INode> getChildrenList(int snapshotId) {
+      return getParent().getChildrenList(snapshotId);
     }
 
     @Override
-    public INode getChild(byte[] name, Snapshot snapshot) {
-      return getParent().getChild(name, snapshot);
+    public INode getChild(byte[] name, int snapshotId) {
+      return getParent().getChild(name, snapshotId);
     }
     
     @Override

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Wed Jan  8 14:36:09 2014
@@ -118,7 +118,7 @@ public class SnapshotFSImageFormat {
 
   private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
       FSImageFormat.Loader loader) throws IOException {
-    // 1. Read the full path of the Snapshot root to identify the Snapshot
+    // 1. Read the id of the Snapshot root to identify the Snapshot
     final Snapshot snapshot = loader.getSnapshot(in);
 
     // 2. Load file size
@@ -128,7 +128,7 @@ public class SnapshotFSImageFormat {
     final INodeFileAttributes snapshotINode = in.readBoolean()?
         loader.loadINodeFileAttributes(in): null;
     
-    return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
+    return new FileDiff(snapshot.getId(), snapshotINode, posterior, fileSize);
   }
 
   /**
@@ -149,7 +149,8 @@ public class SnapshotFSImageFormat {
       } // else go to the next SnapshotDiff
     } 
     // use the current child
-    INode currentChild = parent.getChild(createdNodeName, null);
+    INode currentChild = parent.getChild(createdNodeName,
+        Snapshot.CURRENT_STATE_ID);
     if (currentChild == null) {
       throw new IOException("Cannot find an INode associated with the INode "
           + DFSUtil.bytes2String(createdNodeName)
@@ -295,9 +296,9 @@ public class SnapshotFSImageFormat {
     
     // 6. Compose the SnapshotDiff
     List<DirectoryDiff> diffs = parent.getDiffs().asList();
-    DirectoryDiff sdiff = new DirectoryDiff(snapshot, snapshotINode,
-        diffs.isEmpty() ? null : diffs.get(0),
-        childrenSize, createdList, deletedList);
+    DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
+        diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
+        deletedList, snapshotINode == snapshot.getRoot());
     return sdiff;
   }
   

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Wed Jan  8 14:36:09 2014
@@ -114,7 +114,7 @@ public class SnapshotManager implements 
       s = (INodeDirectorySnapshottable)d; 
       s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
     } else {
-      s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot(),
+      s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
           fsdir.getINodeMap());
     }
     addSnapshottable(s);
@@ -160,7 +160,7 @@ public class SnapshotManager implements 
     if (s == fsdir.getRoot()) {
       s.setSnapshotQuota(0); 
     } else {
-      s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());
+      s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap());
     }
     removeSnapshottable(s);
   }
@@ -324,7 +324,8 @@ public class SnapshotManager implements 
         SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
             dir.getModificationTime(), dir.getAccessTime(),
             dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
-            dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(null),
+            dir.getLocalNameBytes(), dir.getId(), 
+            dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
             dir.getNumSnapshots(),
             dir.getSnapshotQuota(), dir.getParent() == null ? 
                 DFSUtil.EMPTY_BYTES : 

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java Wed Jan  8 14:36:09 2014
@@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.server.pr
  * Utilization report for a Datanode storage
  */
 public class StorageReport {
-  private final String storageID;
+  private final DatanodeStorage storage;
   private final boolean failed;
   private final long capacity;
   private final long dfsUsed;
@@ -30,9 +30,9 @@ public class StorageReport {
 
   public static final StorageReport[] EMPTY_ARRAY = {};
   
-  public StorageReport(String sid, boolean failed, long capacity, long dfsUsed,
-      long remaining, long bpUsed) {
-    this.storageID = sid;
+  public StorageReport(DatanodeStorage storage, boolean failed,
+      long capacity, long dfsUsed, long remaining, long bpUsed) {
+    this.storage = storage;
     this.failed = failed;
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
@@ -40,8 +40,8 @@ public class StorageReport {
     this.blockPoolUsed = bpUsed;
   }
 
-  public String getStorageID() {
-    return storageID;
+  public DatanodeStorage getStorage() {
+    return storage;
   }
 
   public boolean isFailed() {

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Wed Jan  8 14:36:09 2014
@@ -196,12 +196,13 @@ message HeartbeatRequestProto {
 }
 
 message StorageReportProto {
-  required string storageUuid = 1;
+  required string storageUuid = 1 [ deprecated = true ];
   optional bool failed = 2 [ default = false ];
   optional uint64 capacity = 3 [ default = 0 ];
   optional uint64 dfsUsed = 4 [ default = 0 ];
   optional uint64 remaining = 5 [ default = 0 ];
   optional uint64 blockPoolUsed = 6 [ default = 0 ];
+  optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
 }
 
 /**

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Jan  8 14:36:09 2014
@@ -140,6 +140,7 @@ public class MiniDFSCluster {
     private int nameNodeHttpPort = 0;
     private final Configuration conf;
     private int numDataNodes = 1;
+    private StorageType storageType = StorageType.DEFAULT;
     private boolean format = true;
     private boolean manageNameDfsDirs = true;
     private boolean manageNameDfsSharedDirs = true;
@@ -186,6 +187,14 @@ public class MiniDFSCluster {
     }
 
     /**
+     * Default: StorageType.DEFAULT
+     */
+    public Builder storageType(StorageType type) {
+      this.storageType = type;
+      return this;
+    }
+
+    /**
      * Default: true
      */
     public Builder format(boolean val) {
@@ -341,6 +350,7 @@ public class MiniDFSCluster {
       
     initMiniDFSCluster(builder.conf,
                        builder.numDataNodes,
+                       builder.storageType,
                        builder.format,
                        builder.manageNameDfsDirs,
                        builder.manageNameDfsSharedDirs,
@@ -592,7 +602,7 @@ public class MiniDFSCluster {
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
     this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
-    initMiniDFSCluster(conf, numDataNodes, format,
+    initMiniDFSCluster(conf, numDataNodes, StorageType.DEFAULT, format,
         manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
         operation, racks, hosts,
         simulatedCapacities, null, true, false,
@@ -601,7 +611,7 @@ public class MiniDFSCluster {
 
   private void initMiniDFSCluster(
       Configuration conf,
-      int numDataNodes, boolean format, boolean manageNameDfsDirs,
+      int numDataNodes, StorageType storageType, boolean format, boolean manageNameDfsDirs,
       boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
       boolean manageDataDfsDirs, StartupOption operation, String[] racks,
       String[] hosts, long[] simulatedCapacities, String clusterId,
@@ -670,7 +680,7 @@ public class MiniDFSCluster {
     }
 
     // Start the DataNodes
-    startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
+    startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs, operation, racks,
         hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
     waitClusterUp();
     //make sure ProxyUsers uses the latest conf
@@ -990,6 +1000,19 @@ public class MiniDFSCluster {
     }
   }
 
+  String makeDataNodeDirs(int dnIndex, StorageType storageType) throws IOException {
+    StringBuilder sb = new StringBuilder();
+    for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
+      File dir = getInstanceStorageDir(dnIndex, j);
+      dir.mkdirs();
+      if (!dir.isDirectory()) {
+        throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
+      }
+      sb.append((j > 0 ? "," : "") + "[" + storageType + "]" + fileAsURI(dir));
+    }
+    return sb.toString();
+  }
+
   /**
    * Modify the config and start up additional DataNodes.  The info port for
    * DataNodes is guaranteed to use a free port.
@@ -1052,7 +1075,7 @@ public class MiniDFSCluster {
                              String[] racks, String[] hosts,
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
-    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
         simulatedCapacities, setupHostsFile, false, false);
   }
 
@@ -1066,7 +1089,7 @@ public class MiniDFSCluster {
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig) throws IOException {
-    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, hosts,
         simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false);
   }
 
@@ -1098,7 +1121,7 @@ public class MiniDFSCluster {
    * @throws IllegalStateException if NameNode has been shutdown
    */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-      boolean manageDfsDirs, StartupOption operation, 
+      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
@@ -1154,16 +1177,7 @@ public class MiniDFSCluster {
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        StringBuilder sb = new StringBuilder();
-        for (int j = 0; j < DIRS_PER_DATANODE; ++j) {
-          File dir = getInstanceStorageDir(i, j);
-          dir.mkdirs();
-          if (!dir.isDirectory()) {
-            throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
-          }
-          sb.append((j > 0 ? "," : "") + fileAsURI(dir));
-        }
-        String dirs = sb.toString();
+        String dirs = makeDataNodeDirs(i, storageType);
         dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
       }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java Wed Jan  8 14:36:09 2014
@@ -50,7 +50,7 @@ public class MiniDFSClusterWithNodeGroup
   }
 
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-      boolean manageDfsDirs, StartupOption operation, 
+      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] nodeGroups, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
@@ -112,15 +112,7 @@ public class MiniDFSClusterWithNodeGroup
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        File dir1 = getInstanceStorageDir(i, 0);
-        File dir2 = getInstanceStorageDir(i, 1);
-        dir1.mkdirs();
-        dir2.mkdirs();
-        if (!dir1.isDirectory() || !dir2.isDirectory()) { 
-          throw new IOException("Mkdirs failed to create directory for DataNode "
-              + i + ": " + dir1 + " or " + dir2);
-        }
-        String dirs = fileAsURI(dir1) + "," + fileAsURI(dir2);
+        String dirs = makeDataNodeDirs(i, storageType);
         dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
         conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
       }
@@ -198,7 +190,7 @@ public class MiniDFSClusterWithNodeGroup
       String[] racks, String[] nodeGroups, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile) throws IOException {
-    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, nodeGroups, 
+    startDataNodes(conf, numDataNodes, StorageType.DEFAULT, manageDfsDirs, operation, racks, nodeGroups,
         hosts, simulatedCapacities, setupHostsFile, false, false);
   }
 
@@ -213,13 +205,13 @@ public class MiniDFSClusterWithNodeGroup
   // This is for initialize from parent class.
   @Override
   public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
-      boolean manageDfsDirs, StartupOption operation, 
+      StorageType storageType, boolean manageDfsDirs, StartupOption operation,
       String[] racks, String[] hosts,
       long[] simulatedCapacities,
       boolean setupHostsFile,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig) throws IOException {
-    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, 
+    startDataNodes(conf, numDataNodes, storageType, manageDfsDirs, operation, racks,
         NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile, 
         checkDataNodeAddrConfig, checkDataNodeHostConfig);
   }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Wed Jan  8 14:36:09 2014
@@ -257,8 +257,10 @@ public class BlockManagerTestUtil {
       DatanodeDescriptor dnd) {
     ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
     for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
+      DatanodeStorage dns = new DatanodeStorage(
+          storage.getStorageID(), storage.getState(), storage.getStorageType());
       StorageReport report = new StorageReport(
-          storage.getStorageID(), false, storage.getCapacity(),
+          dns ,false, storage.getCapacity(),
           storage.getDfsUsed(), storage.getRemaining(),
           storage.getBlockPoolUsed());
       reports.add(report);

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Wed Jan  8 14:36:09 2014
@@ -470,11 +470,14 @@ public class TestJspHelper {
     BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1"));
     BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2"));
 
+    DatanodeStorage dns1 = new DatanodeStorage("dnStorage1");
+    DatanodeStorage dns2 = new DatanodeStorage("dnStorage2");
+
     StorageReport[] report1 = new StorageReport[] {
-        new StorageReport("dnStorage1", false, 1024, 100, 924, 100)
+        new StorageReport(dns1, false, 1024, 100, 924, 100)
     };
     StorageReport[] report2 = new StorageReport[] {
-        new StorageReport("dnStorage2", false, 2500, 200, 1848, 200)
+        new StorageReport(dns2, false, 2500, 200, 1848, 200)
     };
     dnDesc1.updateHeartbeat(report1, 5l, 3l, 10, 2);
     dnDesc2.updateHeartbeat(report2, 10l, 2l, 20, 1);

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Wed Jan  8 14:36:09 2014
@@ -394,8 +394,9 @@ public class SimulatedFSDataset implemen
     }
 
     synchronized StorageReport getStorageReport(String bpid) {
-      return new StorageReport(getStorageUuid(), false, getCapacity(),
-          getUsed(), getFree(), map.get(bpid).getUsed());
+      return new StorageReport(new DatanodeStorage(getStorageUuid()),
+          false, getCapacity(), getUsed(), getFree(),
+          map.get(bpid).getUsed());
     }
   }
   

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Wed Jan  8 14:36:09 2014
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.util.DataChecksum;
 import org.junit.After;
@@ -186,9 +187,8 @@ public class TestDiskError {
     // Check permissions on directories in 'dfs.datanode.data.dir'
     FileSystem localFS = FileSystem.getLocal(conf);
     for (DataNode dn : cluster.getDataNodes()) {
-      String[] dataDirs =
-        dn.getConf().getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
-      for (String dir : dataDirs) {
+      for (FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
+        String dir = v.getBasePath();
         Path dataDir = new Path(dir);
         FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
           assertEquals("Permission for dir: " + dataDir + ", is " + actual +

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Wed Jan  8 14:36:09 2014
@@ -43,10 +43,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -109,8 +112,9 @@ public class TestFsDatasetCache {
   public void setUp() throws Exception {
     assumeTrue(!Path.WINDOWS);
     conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS,
-        500);
+    conf.setLong(
+        DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
+    conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
         CACHE_CAPACITY);
@@ -328,7 +332,7 @@ public class TestFsDatasetCache {
 
     // Create some test files that will exceed total cache capacity
     final int numFiles = 5;
-    final long fileSize = 15000;
+    final long fileSize = CACHE_CAPACITY / (numFiles-1);
 
     final Path[] testFiles = new Path[numFiles];
     final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
@@ -477,4 +481,42 @@ public class TestFsDatasetCache {
     setHeartbeatResponse(uncacheBlocks(locs));
     verifyExpectedCacheUsage(0, 0);
   }
+
+  @Test(timeout=60000)
+  public void testUncacheQuiesces() throws Exception {
+    // Create a file
+    Path fileName = new Path("/testUncacheQuiesces");
+    int fileLen = 4096;
+    DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
+    // Cache it
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    dfs.addCachePool(new CachePoolInfo("pool"));
+    dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
+        .setPool("pool").setPath(fileName).setReplication((short)3).build());
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
+        long blocksCached =
+            MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
+        return blocksCached > 0;
+      }
+    }, 1000, 30000);
+    // Uncache it
+    dfs.removeCacheDirective(1);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
+        long blocksUncached =
+            MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
+        return blocksUncached > 0;
+      }
+    }, 1000, 30000);
+    // Make sure that no additional messages were sent
+    Thread.sleep(10000);
+    MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
+    MetricsAsserts.assertCounter("BlocksCached", 1l, dnMetrics);
+    MetricsAsserts.assertCounter("BlocksUncached", 1l, dnMetrics);
+  }
 }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Jan  8 14:36:09 2014
@@ -938,7 +938,7 @@ public class NNThroughputBenchmark imple
       // register datanode
       dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
       //first block reports
-      storage = new DatanodeStorage(dnRegistration.getDatanodeUuid());
+      storage = new DatanodeStorage(DatanodeStorage.generateUuid());
       final StorageBlockReport[] reports = {
           new StorageBlockReport(storage,
               new BlockListAsLongs(null, null).getBlockListAsLongs())
@@ -954,8 +954,8 @@ public class NNThroughputBenchmark imple
     void sendHeartbeat() throws IOException {
       // register datanode
       // TODO:FEDERATION currently a single block pool is supported
-      StorageReport[] rep = { new StorageReport(dnRegistration.getDatanodeUuid(),
-          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
+      StorageReport[] rep = { new StorageReport(storage, false,
+          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
           0L, 0L, 0, 0, 0).getCommands();
       if(cmds != null) {
@@ -1001,7 +1001,7 @@ public class NNThroughputBenchmark imple
     @SuppressWarnings("unused") // keep it for future blockReceived benchmark
     int replicateBlocks() throws IOException {
       // register datanode
-      StorageReport[] rep = { new StorageReport(dnRegistration.getDatanodeUuid(),
+      StorageReport[] rep = { new StorageReport(storage,
           false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
           rep, 0L, 0L, 0, 0, 0).getCommands();
@@ -1010,7 +1010,8 @@ public class NNThroughputBenchmark imple
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
             // Send a copy of a block to another datanode
             BlockCommand bcmd = (BlockCommand)cmd;
-            return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
+            return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
+                                  bcmd.getTargetStorageIDs());
           }
         }
       }
@@ -1023,12 +1024,14 @@ public class NNThroughputBenchmark imple
      * that the blocks have been received.
      */
     private int transferBlocks( Block blocks[], 
-                                DatanodeInfo xferTargets[][] 
+                                DatanodeInfo xferTargets[][],
+                                String targetStorageIDs[][]
                               ) throws IOException {
       for(int i = 0; i < blocks.length; i++) {
         DatanodeInfo blockTargets[] = xferTargets[i];
         for(int t = 0; t < blockTargets.length; t++) {
           DatanodeInfo dnInfo = blockTargets[t];
+          String targetStorageID = targetStorageIDs[i][t];
           DatanodeRegistration receivedDNReg;
           receivedDNReg = new DatanodeRegistration(dnInfo,
             new DataStorage(nsInfo),
@@ -1038,7 +1041,7 @@ public class NNThroughputBenchmark imple
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
                   null) };
           StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
-              receivedDNReg.getDatanodeUuid(), rdBlocks) };
+              targetStorageID, rdBlocks) };
           nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
               .getNamesystem().getBlockPoolId(), report);
         }
@@ -1127,7 +1130,7 @@ public class NNThroughputBenchmark imple
       }
 
       // create files 
-      LOG.info("Creating " + nrFiles + " with " + blocksPerFile + " blocks each.");
+      LOG.info("Creating " + nrFiles + " files with " + blocksPerFile + " blocks each.");
       FileNameGenerator nameGenerator;
       nameGenerator = new FileNameGenerator(getBaseDir(), 100);
       String clientName = getClientName(007);
@@ -1161,7 +1164,7 @@ public class NNThroughputBenchmark imple
               loc.getBlock().getLocalBlock(),
               ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
           StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
-              datanodes[dnIdx].dnRegistration.getDatanodeUuid(), rdBlocks) };
+              datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
           nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
               .getBlock().getBlockPoolId(), report);
         }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Wed Jan  8 14:36:09 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.File;
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 import java.util.Iterator;
 
 import org.apache.commons.logging.Log;
@@ -29,25 +28,13 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Options.Rename;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
 
 /**
  * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
@@ -135,151 +122,11 @@ public class OfflineEditsViewerHelper {
    * OP_CLEAR_NS_QUOTA  (12)
    */
   private CheckpointSignature runOperations() throws IOException {
-
     LOG.info("Creating edits by performing fs operations");
     // no check, if it's not it throws an exception which is what we want
-    DistributedFileSystem dfs =
-      (DistributedFileSystem)cluster.getFileSystem();
-    FileContext fc = FileContext.getFileContext(cluster.getURI(0), config);
-    // OP_ADD 0
-    Path pathFileCreate = new Path("/file_create_u\1F431");
-    FSDataOutputStream s = dfs.create(pathFileCreate);
-    // OP_CLOSE 9
-    s.close();
-    // OP_RENAME_OLD 1
-    Path pathFileMoved = new Path("/file_moved");
-    dfs.rename(pathFileCreate, pathFileMoved);
-    // OP_DELETE 2
-    dfs.delete(pathFileMoved, false);
-    // OP_MKDIR 3
-    Path pathDirectoryMkdir = new Path("/directory_mkdir");
-    dfs.mkdirs(pathDirectoryMkdir);
-    // OP_ALLOW_SNAPSHOT 29
-    dfs.allowSnapshot(pathDirectoryMkdir);
-    // OP_DISALLOW_SNAPSHOT 30
-    dfs.disallowSnapshot(pathDirectoryMkdir);
-    // OP_CREATE_SNAPSHOT 26
-    String ssName = "snapshot1";
-    dfs.allowSnapshot(pathDirectoryMkdir);
-    dfs.createSnapshot(pathDirectoryMkdir, ssName);
-    // OP_RENAME_SNAPSHOT 28
-    String ssNewName = "snapshot2";
-    dfs.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
-    // OP_DELETE_SNAPSHOT 27
-    dfs.deleteSnapshot(pathDirectoryMkdir, ssNewName);
-    // OP_SET_REPLICATION 4
-    s = dfs.create(pathFileCreate);
-    s.close();
-    dfs.setReplication(pathFileCreate, (short)1);
-    // OP_SET_PERMISSIONS 7
-    Short permission = 0777;
-    dfs.setPermission(pathFileCreate, new FsPermission(permission));
-    // OP_SET_OWNER 8
-    dfs.setOwner(pathFileCreate, new String("newOwner"), null);
-    // OP_CLOSE 9 see above
-    // OP_SET_GENSTAMP 10 see above
-    // OP_SET_NS_QUOTA 11 obsolete
-    // OP_CLEAR_NS_QUOTA 12 obsolete
-    // OP_TIMES 13
-    long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
-    long atime = mtime;
-    dfs.setTimes(pathFileCreate, mtime, atime);
-    // OP_SET_QUOTA 14
-    dfs.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET);
-    // OP_RENAME 15
-    fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
-    // OP_CONCAT_DELETE 16
-    Path   pathConcatTarget = new Path("/file_concat_target");
-    Path[] pathConcatFiles  = new Path[2];
-    pathConcatFiles[0]      = new Path("/file_concat_0");
-    pathConcatFiles[1]      = new Path("/file_concat_1");
-
-    long  length      = blockSize * 3; // multiple of blocksize for concat
-    short replication = 1;
-    long  seed        = 1;
-
-    DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed);
-    DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed);
-    DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed);
-    dfs.concat(pathConcatTarget, pathConcatFiles);
-    // OP_SYMLINK 17
-    Path pathSymlink = new Path("/file_symlink");
-    fc.createSymlink(pathConcatTarget, pathSymlink, false);
-    // OP_GET_DELEGATION_TOKEN 18
-    // OP_RENEW_DELEGATION_TOKEN 19
-    // OP_CANCEL_DELEGATION_TOKEN 20
-    // see TestDelegationToken.java
-    // fake the user to renew token for
-    final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", null);
-    UserGroupInformation longUgi = UserGroupInformation.createRemoteUser(
-      "JobTracker/foo.com@FOO.COM");
-    try {
-      longUgi.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws IOException, InterruptedException {
-          for (Token<?> token : tokens) {
-            token.renew(config);
-            token.cancel(config);
-          }
-          return null;
-        }
-      });
-    } catch(InterruptedException e) {
-      throw new IOException(
-        "renewDelegationToken threw InterruptedException", e);
-    }
-    // OP_UPDATE_MASTER_KEY 21
-    //   done by getDelegationTokenSecretManager().startThreads();
-
-    // OP_ADD_CACHE_POOL 35
-    final String pool = "poolparty";
-    dfs.addCachePool(new CachePoolInfo(pool));
-    // OP_MODIFY_CACHE_POOL 36
-    dfs.modifyCachePool(new CachePoolInfo(pool)
-        .setOwnerName("carlton")
-        .setGroupName("party")
-        .setMode(new FsPermission((short)0700))
-        .setLimit(1989l));
-    // OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
-    long id = dfs.addCacheDirective(
-        new CacheDirectiveInfo.Builder().
-            setPath(new Path("/bar")).
-            setReplication((short)1).
-            setPool(pool).
-            build());
-    // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 38
-    dfs.modifyCacheDirective(
-        new CacheDirectiveInfo.Builder().
-            setId(id).
-            setPath(new Path("/bar2")).
-            build());
-    // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE 34
-    dfs.removeCacheDirective(id);
-    // OP_REMOVE_CACHE_POOL 37
-    dfs.removeCachePool(pool);
-    // sync to disk, otherwise we parse partial edits
-    cluster.getNameNode().getFSImage().getEditLog().logSync();
-    
-    // OP_REASSIGN_LEASE 22
-    String filePath = "/hard-lease-recovery-test";
-    byte[] bytes = "foo-bar-baz".getBytes();
-    DFSClientAdapter.stopLeaseRenewer(dfs);
-    FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
-    leaseRecoveryPath.write(bytes);
-    leaseRecoveryPath.hflush();
-    // Set the hard lease timeout to 1 second.
-    cluster.setLeasePeriod(60 * 1000, 1000);
-    // wait for lease recovery to complete
-    LocatedBlocks locatedBlocks;
-    do {
-      try {
-        Thread.sleep(1000);
-      } catch (InterruptedException e) {
-        LOG.info("Innocuous exception", e);
-      }
-      locatedBlocks = DFSClientAdapter.callGetBlockLocations(
-          cluster.getNameNodeRpc(), filePath, 0L, bytes.length);
-    } while (locatedBlocks.isUnderConstruction());
+    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
+    DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0),
+        dfs.getDefaultBlockSize(), 0);
 
     // Force a roll so we get an OP_END_LOG_SEGMENT txn
     return cluster.getNameNodeRpc().rollEditLog();

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Wed Jan  8 14:36:09 2014
@@ -57,17 +57,18 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -81,6 +82,7 @@ import org.apache.hadoop.test.GenericTes
 import org.apache.hadoop.util.GSet;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -603,8 +605,8 @@ public class TestCacheDirectives {
    * Wait for the NameNode to have an expected number of cached blocks
    * and replicas.
    * @param nn NameNode
-   * @param expectedCachedBlocks
-   * @param expectedCachedReplicas
+   * @param expectedCachedBlocks if -1, treat as wildcard
+   * @param expectedCachedReplicas if -1, treat as wildcard
    * @throws Exception
    */
   private static void waitForCachedBlocks(NameNode nn,
@@ -633,16 +635,18 @@ public class TestCacheDirectives {
         } finally {
           namesystem.readUnlock();
         }
-        if ((numCachedBlocks == expectedCachedBlocks) && 
-            (numCachedReplicas == expectedCachedReplicas)) {
-          return true;
-        } else {
-          LOG.info(logString + " cached blocks: have " + numCachedBlocks +
-              " / " + expectedCachedBlocks + ".  " +
-              "cached replicas: have " + numCachedReplicas +
-              " / " + expectedCachedReplicas);
-          return false;
+        if (expectedCachedBlocks == -1 ||
+            numCachedBlocks == expectedCachedBlocks) {
+          if (expectedCachedReplicas == -1 ||
+              numCachedReplicas == expectedCachedReplicas) {
+            return true;
+          }
         }
+        LOG.info(logString + " cached blocks: have " + numCachedBlocks +
+            " / " + expectedCachedBlocks + ".  " +
+            "cached replicas: have " + numCachedReplicas +
+            " / " + expectedCachedReplicas);
+        return false;
       }
     }, 500, 60000);
   }
@@ -1351,4 +1355,39 @@ public class TestCacheDirectives {
         .setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1))
         .build());
   }
+
+  @Test(timeout=60000)
+  public void testExceedsCapacity() throws Exception {
+    // Create a giant file
+    final Path fileName = new Path("/exceeds");
+    final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
+    int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
+    DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
+        0xFADED);
+    // Set up a log appender watcher
+    final LogVerificationAppender appender = new LogVerificationAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
+    dfs.addCachePool(new CachePoolInfo("pool"));
+    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
+        .setPath(fileName).setReplication((short) 1).build());
+    waitForCachedBlocks(namenode, -1, numCachedReplicas,
+        "testExceeds:1");
+    // Check that no DNs saw an excess CACHE message
+    int lines = appender.countLinesWithMessage(
+        "more bytes in the cache: " +
+        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
+    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
+    // Try creating a file with giant-sized blocks that exceed cache capacity
+    dfs.delete(fileName, false);
+    DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
+        (short) 1, 0xFADED);
+    // Nothing will get cached, so just force sleep for a bit
+    Thread.sleep(4000);
+    // Still should not see any excess commands
+    lines = appender.countLinesWithMessage(
+        "more bytes in the cache: " +
+        DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
+    assertEquals("Namenode should not send extra CACHE commands", 0, lines);
+  }
 }

Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1556552&r1=1556551&r2=1556552&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Wed Jan  8 14:36:09 2014
@@ -140,8 +140,9 @@ public class TestDeadDatanode {
 
     // Ensure heartbeat from dead datanode is rejected with a command
     // that asks datanode to register again
-    StorageReport[] rep = { new StorageReport(reg.getDatanodeUuid(), false, 0, 0,
-        0, 0) };
+    StorageReport[] rep = { new StorageReport(
+        new DatanodeStorage(reg.getDatanodeUuid()),
+        false, 0, 0, 0, 0) };
     DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0)
         .getCommands();
     assertEquals(1, cmd.length);



Mime
View raw message