hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1608603 [4/6] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-...
Date Mon, 07 Jul 2014 20:44:09 GMT
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java Mon Jul  7 20:43:56 2014
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.server.na
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Deque;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -30,8 +28,6 @@ import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.server.namenode.Content;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
@@ -161,59 +157,6 @@ public class DirectoryWithSnapshotFeatur
         }
       }
     }
-
-    /**
-     * Interpret the diff and generate a list of {@link DiffReportEntry}.
-     * @param parentPath The relative path of the parent.
-     * @param fromEarlier True indicates {@code diff=later-earlier},
-     *                    False indicates {@code diff=earlier-later}
-     * @return A list of {@link DiffReportEntry} as the diff report.
-     */
-    public List<DiffReportEntry> generateReport(byte[][] parentPath,
-        boolean fromEarlier) {
-      List<DiffReportEntry> cList = new ArrayList<DiffReportEntry>();
-      List<DiffReportEntry> dList = new ArrayList<DiffReportEntry>();
-      int c = 0, d = 0;
-      List<INode> created = getList(ListType.CREATED);
-      List<INode> deleted = getList(ListType.DELETED);
-      byte[][] fullPath = new byte[parentPath.length + 1][];
-      System.arraycopy(parentPath, 0, fullPath, 0, parentPath.length);
-      for (; c < created.size() && d < deleted.size(); ) {
-        INode cnode = created.get(c);
-        INode dnode = deleted.get(d);
-        if (cnode.compareTo(dnode.getLocalNameBytes()) == 0) {
-          fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
-          // must be the case: delete first and then create an inode with the
-          // same name
-          cList.add(new DiffReportEntry(DiffType.CREATE, fullPath));
-          dList.add(new DiffReportEntry(DiffType.DELETE, fullPath));
-          c++;
-          d++;
-        } else if (cnode.compareTo(dnode.getLocalNameBytes()) < 0) {
-          fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
-          cList.add(new DiffReportEntry(fromEarlier ? DiffType.CREATE
-              : DiffType.DELETE, fullPath));
-          c++;
-        } else {
-          fullPath[fullPath.length - 1] = dnode.getLocalNameBytes();
-          dList.add(new DiffReportEntry(fromEarlier ? DiffType.DELETE
-              : DiffType.CREATE, fullPath));
-          d++;
-        }
-      }
-      for (; d < deleted.size(); d++) {
-        fullPath[fullPath.length - 1] = deleted.get(d).getLocalNameBytes();
-        dList.add(new DiffReportEntry(fromEarlier ? DiffType.DELETE
-            : DiffType.CREATE, fullPath));
-      }
-      for (; c < created.size(); c++) {
-        fullPath[fullPath.length - 1] = created.get(c).getLocalNameBytes();
-        cList.add(new DiffReportEntry(fromEarlier ? DiffType.CREATE
-            : DiffType.DELETE, fullPath));
-      }
-      dList.addAll(cList);
-      return dList;
-    }
   }
 
   /**
@@ -724,34 +667,21 @@ public class DirectoryWithSnapshotFeatur
    */
   boolean computeDiffBetweenSnapshots(Snapshot fromSnapshot,
       Snapshot toSnapshot, ChildrenDiff diff, INodeDirectory currentINode) {
-    Snapshot earlier = fromSnapshot;
-    Snapshot later = toSnapshot;
-    if (Snapshot.ID_COMPARATOR.compare(fromSnapshot, toSnapshot) > 0) {
-      earlier = toSnapshot;
-      later = fromSnapshot;
-    }
-
-    boolean modified = diffs.changedBetweenSnapshots(earlier, later);
-    if (!modified) {
+    int[] diffIndexPair = diffs.changedBetweenSnapshots(fromSnapshot,
+        toSnapshot);
+    if (diffIndexPair == null) {
       return false;
     }
-
-    final List<DirectoryDiff> difflist = diffs.asList();
-    final int size = difflist.size();
-    int earlierDiffIndex = Collections.binarySearch(difflist, earlier.getId());
-    int laterDiffIndex = later == null ? size : Collections
-        .binarySearch(difflist, later.getId());
-    earlierDiffIndex = earlierDiffIndex < 0 ? (-earlierDiffIndex - 1)
-        : earlierDiffIndex;
-    laterDiffIndex = laterDiffIndex < 0 ? (-laterDiffIndex - 1)
-        : laterDiffIndex;
+    int earlierDiffIndex = diffIndexPair[0];
+    int laterDiffIndex = diffIndexPair[1];
 
     boolean dirMetadataChanged = false;
     INodeDirectoryAttributes dirCopy = null;
+    List<DirectoryDiff> difflist = diffs.asList();
     for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
       DirectoryDiff sdiff = difflist.get(i);
       diff.combinePosterior(sdiff.diff, null);
-      if (dirMetadataChanged == false && sdiff.snapshotINode != null) {
+      if (!dirMetadataChanged && sdiff.snapshotINode != null) {
         if (dirCopy == null) {
           dirCopy = sdiff.snapshotINode;
         } else if (!dirCopy.metadataEquals(sdiff.snapshotINode)) {
@@ -763,7 +693,7 @@ public class DirectoryWithSnapshotFeatur
     if (!diff.isEmpty() || dirMetadataChanged) {
       return true;
     } else if (dirCopy != null) {
-      for (int i = laterDiffIndex; i < size; i++) {
+      for (int i = laterDiffIndex; i < difflist.size(); i++) {
         if (!dirCopy.metadataEquals(difflist.get(i).snapshotINode)) {
           return true;
         }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java Mon Jul  7 20:43:56 2014
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
 import org.apache.hadoop.hdfs.server.namenode.Quota;
 
 /**
@@ -73,7 +74,41 @@ public class FileWithSnapshotFeature imp
     }
     return max;
   }
-  
+
+  boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
+    int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
+    if (diffIndexPair == null) {
+      return false;
+    }
+    int earlierDiffIndex = diffIndexPair[0];
+    int laterDiffIndex = diffIndexPair[1];
+
+    final List<FileDiff> diffList = diffs.asList();
+    final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
+    final long laterLength = laterDiffIndex == diffList.size() ? file
+        .computeFileSize(true, false) : diffList.get(laterDiffIndex)
+        .getFileSize();
+    if (earlierLength != laterLength) { // file length has been changed
+      return true;
+    }
+
+    INodeFileAttributes earlierAttr = null; // check the metadata
+    for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
+      FileDiff diff = diffList.get(i);
+      if (diff.snapshotINode != null) {
+        earlierAttr = diff.snapshotINode;
+        break;
+      }
+    }
+    if (earlierAttr == null) { // no meta-change at all, return false
+      return false;
+    }
+    INodeFileAttributes laterAttr = diffs.getSnapshotINode(
+        Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
+        file);
+    return !earlierAttr.metadataEquals(laterAttr);
+  }
+
   public String getDetailedString() {
     return (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Mon Jul  7 20:43:56 2014
@@ -21,21 +21,14 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.Content;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
@@ -43,6 +36,9 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeMap;
+import org.apache.hadoop.hdfs.server.namenode.INodeReference;
+import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
+import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
 import org.apache.hadoop.hdfs.server.namenode.Quota;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
@@ -51,7 +47,7 @@ import org.apache.hadoop.hdfs.util.ReadO
 import org.apache.hadoop.util.Time;
 
 import com.google.common.base.Preconditions;
-import com.google.common.primitives.SignedBytes;
+import com.google.common.collect.Lists;
 
 /**
  * Directories where taking snapshots is allowed.
@@ -74,97 +70,6 @@ public class INodeDirectorySnapshottable
     }
     return (INodeDirectorySnapshottable)dir;
   }
-  
-  /**
-   * A class describing the difference between snapshots of a snapshottable
-   * directory.
-   */
-  public static class SnapshotDiffInfo {
-    /** Compare two inodes based on their full names */
-    public static final Comparator<INode> INODE_COMPARATOR = 
-        new Comparator<INode>() {
-      @Override
-      public int compare(INode left, INode right) {
-        if (left == null) {
-          return right == null ? 0 : -1;
-        } else {
-          if (right == null) {
-            return 1;
-          } else {
-            int cmp = compare(left.getParent(), right.getParent());
-            return cmp == 0 ? SignedBytes.lexicographicalComparator().compare(
-                left.getLocalNameBytes(), right.getLocalNameBytes()) : cmp;
-          }
-        }
-      }
-    };
-    
-    /** The root directory of the snapshots */
-    private final INodeDirectorySnapshottable snapshotRoot;
-    /** The starting point of the difference */
-    private final Snapshot from;
-    /** The end point of the difference */
-    private final Snapshot to;
-    /**
-     * A map recording modified INodeFile and INodeDirectory and their relative
-     * path corresponding to the snapshot root. Sorted based on their names.
-     */ 
-    private final SortedMap<INode, byte[][]> diffMap = 
-        new TreeMap<INode, byte[][]>(INODE_COMPARATOR);
-    /**
-     * A map capturing the detailed difference about file creation/deletion.
-     * Each key indicates a directory whose children have been changed between
-     * the two snapshots, while its associated value is a {@link ChildrenDiff}
-     * storing the changes (creation/deletion) happened to the children (files).
-     */
-    private final Map<INodeDirectory, ChildrenDiff> dirDiffMap = 
-        new HashMap<INodeDirectory, ChildrenDiff>();
-    
-    SnapshotDiffInfo(INodeDirectorySnapshottable snapshotRoot, Snapshot start,
-        Snapshot end) {
-      this.snapshotRoot = snapshotRoot;
-      this.from = start;
-      this.to = end;
-    }
-    
-    /** Add a dir-diff pair */
-    private void addDirDiff(INodeDirectory dir, byte[][] relativePath,
-        ChildrenDiff diff) {
-      dirDiffMap.put(dir, diff);
-      diffMap.put(dir, relativePath);
-    }
-    
-    /** Add a modified file */ 
-    private void addFileDiff(INodeFile file, byte[][] relativePath) {
-      diffMap.put(file, relativePath);
-    }
-    
-    /** @return True if {@link #from} is earlier than {@link #to} */
-    private boolean isFromEarlier() {
-      return Snapshot.ID_COMPARATOR.compare(from, to) < 0;
-    }
-    
-    /**
-     * Generate a {@link SnapshotDiffReport} based on detailed diff information.
-     * @return A {@link SnapshotDiffReport} describing the difference
-     */
-    public SnapshotDiffReport generateReport() {
-      List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>();
-      for (INode node : diffMap.keySet()) {
-        diffReportList.add(new DiffReportEntry(DiffType.MODIFY, diffMap
-            .get(node)));
-        if (node.isDirectory()) {
-          ChildrenDiff dirDiff = dirDiffMap.get(node);
-          List<DiffReportEntry> subList = dirDiff.generateReport(
-              diffMap.get(node), isFromEarlier());
-          diffReportList.addAll(subList);
-        }
-      }
-      return new SnapshotDiffReport(snapshotRoot.getFullPathName(),
-          Snapshot.getSnapshotName(from), Snapshot.getSnapshotName(to),
-          diffReportList);
-    }
-  }
 
   /**
    * Snapshots of this directory in ascending order of snapshot names.
@@ -423,25 +328,37 @@ public class INodeDirectorySnapshottable
    */
   private void computeDiffRecursively(INode node, List<byte[]> parentPath,
       SnapshotDiffInfo diffReport) {
-    ChildrenDiff diff = new ChildrenDiff();
+    final Snapshot earlierSnapshot = diffReport.isFromEarlier() ?
+        diffReport.getFrom() : diffReport.getTo();
+    final Snapshot laterSnapshot = diffReport.isFromEarlier() ?
+        diffReport.getTo() : diffReport.getFrom();
     byte[][] relativePath = parentPath.toArray(new byte[parentPath.size()][]);
     if (node.isDirectory()) {
+      final ChildrenDiff diff = new ChildrenDiff();
       INodeDirectory dir = node.asDirectory();
       DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
       if (sf != null) {
-        boolean change = sf.computeDiffBetweenSnapshots(diffReport.from,
-            diffReport.to, diff, dir);
+        boolean change = sf.computeDiffBetweenSnapshots(earlierSnapshot,
+            laterSnapshot, diff, dir);
         if (change) {
           diffReport.addDirDiff(dir, relativePath, diff);
         }
       }
-      ReadOnlyList<INode> children = dir.getChildrenList(
-          diffReport.isFromEarlier() ? Snapshot.getSnapshotId(diffReport.to) : 
-            Snapshot.getSnapshotId(diffReport.from));
+      ReadOnlyList<INode> children = dir.getChildrenList(earlierSnapshot
+          .getId());
       for (INode child : children) {
         final byte[] name = child.getLocalNameBytes();
-        if (diff.searchIndex(ListType.CREATED, name) < 0
-            && diff.searchIndex(ListType.DELETED, name) < 0) {
+        boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
+        if (!toProcess && child instanceof INodeReference.WithName) {
+          byte[][] renameTargetPath = findRenameTargetPath((WithName) child,
+              laterSnapshot == null ? Snapshot.CURRENT_STATE_ID : 
+                                      laterSnapshot.getId());
+          if (renameTargetPath != null) {
+            toProcess = true;
+            diffReport.setRenameTarget(child.getId(), renameTargetPath);
+          }
+        }
+        if (toProcess) {
           parentPath.add(name);
           computeDiffRecursively(child, parentPath, diffReport);
           parentPath.remove(parentPath.size() - 1);
@@ -449,18 +366,47 @@ public class INodeDirectorySnapshottable
       }
     } else if (node.isFile() && node.asFile().isWithSnapshot()) {
       INodeFile file = node.asFile();
-      Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from
-          : diffReport.to;
-      Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to
-          : diffReport.from;
-      boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot,
-          laterSnapshot);
+      boolean change = file.getFileWithSnapshotFeature()
+          .changedBetweenSnapshots(file, earlierSnapshot, laterSnapshot);
       if (change) {
         diffReport.addFileDiff(file, relativePath);
       }
     }
   }
-  
+
+  /**
+   * We just found a deleted WithName node as the source of a rename operation.
+   * However, we should include it in our snapshot diff report as rename only
+   * if the rename target is also under the same snapshottable directory.
+   */
+  private byte[][] findRenameTargetPath(INodeReference.WithName wn,
+      final int snapshotId) {
+    INode inode = wn.getReferredINode();
+    final LinkedList<byte[]> ancestors = Lists.newLinkedList();
+    while (inode != null) {
+      if (inode == this) {
+        return ancestors.toArray(new byte[ancestors.size()][]);
+      }
+      if (inode instanceof INodeReference.WithCount) {
+        inode = ((WithCount) inode).getParentRef(snapshotId);
+      } else {
+        INode parent = inode.getParentReference() != null ? inode
+            .getParentReference() : inode.getParent();
+        if (parent != null && parent instanceof INodeDirectory) {
+          int sid = parent.asDirectory().searchChild(inode);
+          if (sid < snapshotId) {
+            return null;
+          }
+        }
+        if (!(parent instanceof WithCount)) {
+          ancestors.addFirst(inode.getLocalNameBytes());
+        }
+        inode = parent;
+      }
+    }
+    return null;
+  }
+
   /**
    * Replace itself with {@link INodeDirectoryWithSnapshot} or
    * {@link INodeDirectory} depending on the latest snapshot.
@@ -549,4 +495,4 @@ public class INodeDirectorySnapshottable
       });
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Mon Jul  7 20:43:56 2014
@@ -30,9 +30,11 @@ import java.util.concurrent.atomic.Atomi
 import javax.management.ObjectName;
 
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -40,7 +42,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 /**
@@ -361,12 +362,13 @@ public class SnapshotManager implements 
    * Compute the difference between two snapshots of a directory, or between a
    * snapshot of the directory and its current tree.
    */
-  public SnapshotDiffInfo diff(final String path, final String from,
+  public SnapshotDiffReport diff(final String path, final String from,
       final String to) throws IOException {
     if ((from == null || from.isEmpty())
         && (to == null || to.isEmpty())) {
       // both fromSnapshot and toSnapshot indicate the current tree
-      return null;
+      return new SnapshotDiffReport(path, from, to,
+          Collections.<DiffReportEntry> emptyList());
     }
 
     // Find the source root directory path where the snapshots were taken.
@@ -374,8 +376,10 @@ public class SnapshotManager implements 
     INodesInPath inodesInPath = fsdir.getINodesInPath4Write(path.toString());
     final INodeDirectorySnapshottable snapshotRoot = INodeDirectorySnapshottable
         .valueOf(inodesInPath.getLastINode(), path);
-    
-    return snapshotRoot.computeDiff(from, to);
+
+    final SnapshotDiffInfo diffs = snapshotRoot.computeDiff(from, to);
+    return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
+        path, from, to, Collections.<DiffReportEntry> emptyList());
   }
   
   public void clearSnapshottableDirs() {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java Mon Jul  7 20:43:56 2014
@@ -82,6 +82,11 @@ public class DatanodeStorage {
   }
 
   @Override
+  public String toString() {
+    return "DatanodeStorage["+ storageID + "," + storageType + "," + state +"]";
+  }
+  
+  @Override
   public boolean equals(Object other){
     if (other == this) {
       return true;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java Mon Jul  7 20:43:56 2014
@@ -111,7 +111,7 @@ public class ShortCircuitCache implement
         Long evictionTimeNs = Long.valueOf(0);
         while (true) {
           Entry<Long, ShortCircuitReplica> entry = 
-              evictableMmapped.ceilingEntry(evictionTimeNs);
+              evictable.ceilingEntry(evictionTimeNs);
           if (entry == null) break;
           evictionTimeNs = entry.getKey();
           long evictionTimeMs = 
@@ -384,10 +384,6 @@ public class ShortCircuitCache implement
     this.shmManager = shmManager;
   }
 
-  public long getMmapRetryTimeoutMs() {
-    return mmapRetryTimeoutMs;
-  }
-
   public long getStaleThresholdMs() {
     return staleThresholdMs;
   }
@@ -847,7 +843,7 @@ public class ShortCircuitCache implement
         } else if (replica.mmapData instanceof Long) {
           long lastAttemptTimeMs = (Long)replica.mmapData;
           long delta = Time.monotonicNow() - lastAttemptTimeMs;
-          if (delta < staleThresholdMs) {
+          if (delta < mmapRetryTimeoutMs) {
             if (LOG.isTraceEnabled()) {
               LOG.trace(this + ": can't create client mmap for " +
                   replica + " because we failed to " +

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Mon Jul  7 20:43:56 2014
@@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -292,7 +293,7 @@ public class DFSAdmin extends FsShell {
     static final String USAGE = "-"+NAME+" [<query|prepare|finalize>]";
     static final String DESCRIPTION = USAGE + ":\n"
         + "     query: query the current rolling upgrade status.\n"
-        + "   prepare: prepare a new rolling upgrade."
+        + "   prepare: prepare a new rolling upgrade.\n"
         + "  finalize: finalize the current rolling upgrade.";
 
     /** Check if a command is the rollingUpgrade command
@@ -498,25 +499,60 @@ public class DFSAdmin extends FsShell {
       printUsage("-safemode");
       return;
     }
+
     DistributedFileSystem dfs = getDFS();
-    boolean inSafeMode = dfs.setSafeMode(action);
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
 
-    //
-    // If we are waiting for safemode to exit, then poll and
-    // sleep till we are out of safemode.
-    //
-    if (waitExitSafe) {
-      while (inSafeMode) {
-        try {
-          Thread.sleep(5000);
-        } catch (java.lang.InterruptedException e) {
-          throw new IOException("Wait Interrupted");
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(
+          dfsConf, nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        ClientProtocol haNn = proxy.getProxy();
+        boolean inSafeMode = haNn.setSafeMode(action, false);
+        if (waitExitSafe) {
+          inSafeMode = waitExitSafeMode(haNn, inSafeMode);
         }
-        inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+        System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF")
+            + " in " + proxy.getAddress());
+      }
+    } else {
+      boolean inSafeMode = dfs.setSafeMode(action);
+      if (waitExitSafe) {
+        inSafeMode = waitExitSafeMode(dfs, inSafeMode);
+      }
+      System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
+    }
+
+  }
+
+  private boolean waitExitSafeMode(DistributedFileSystem dfs, boolean inSafeMode)
+      throws IOException {
+    while (inSafeMode) {
+      try {
+        Thread.sleep(5000);
+      } catch (java.lang.InterruptedException e) {
+        throw new IOException("Wait Interrupted");
       }
+      inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
     }
+    return inSafeMode;
+  }
 
-    System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
+  private boolean waitExitSafeMode(ClientProtocol nn, boolean inSafeMode)
+      throws IOException {
+    while (inSafeMode) {
+      try {
+        Thread.sleep(5000);
+      } catch (java.lang.InterruptedException e) {
+        throw new IOException("Wait Interrupted");
+      }
+      inSafeMode = nn.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
+    }
+    return inSafeMode;
   }
 
   /**
@@ -561,7 +597,24 @@ public class DFSAdmin extends FsShell {
     int exitCode = -1;
 
     DistributedFileSystem dfs = getDFS();
-    dfs.saveNamespace();
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().saveNamespace();
+        System.out.println("Save namespace successful for " +
+            proxy.getAddress());
+      }
+    } else {
+      dfs.saveNamespace();
+      System.out.println("Save namespace successful");
+    }
     exitCode = 0;
    
     return exitCode;
@@ -583,15 +636,30 @@ public class DFSAdmin extends FsShell {
    */
   public int restoreFailedStorage(String arg) throws IOException {
     int exitCode = -1;
-
     if(!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
       System.err.println("restoreFailedStorage valid args are true|false|check");
       return exitCode;
     }
     
     DistributedFileSystem dfs = getDFS();
-    Boolean res = dfs.restoreFailedStorage(arg);
-    System.out.println("restoreFailedStorage is set to " + res);
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        Boolean res = proxy.getProxy().restoreFailedStorage(arg);
+        System.out.println("restoreFailedStorage is set to " + res + " for "
+            + proxy.getAddress());
+      }
+    } else {
+      Boolean res = dfs.restoreFailedStorage(arg);
+      System.out.println("restoreFailedStorage is set to " + res);
+    }
     exitCode = 0;
 
     return exitCode;
@@ -607,7 +675,24 @@ public class DFSAdmin extends FsShell {
     int exitCode = -1;
 
     DistributedFileSystem dfs = getDFS();
-    dfs.refreshNodes();
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
+        proxy.getProxy().refreshNodes();
+        System.out.println("Refresh nodes successful for " +
+            proxy.getAddress());
+      }
+    } else {
+      dfs.refreshNodes();
+      System.out.println("Refresh nodes successful");
+    }
     exitCode = 0;
    
     return exitCode;
@@ -641,7 +726,24 @@ public class DFSAdmin extends FsShell {
     }
 
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
-    dfs.setBalancerBandwidth(bandwidth);
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().setBalancerBandwidth(bandwidth);
+        System.out.println("Balancer bandwidth is set to " + bandwidth +
+            " for " + proxy.getAddress());
+      }
+    } else {
+      dfs.setBalancerBandwidth(bandwidth);
+      System.out.println("Balancer bandwidth is set to " + bandwidth);
+    }
     exitCode = 0;
 
     return exitCode;
@@ -937,11 +1039,18 @@ public class DFSAdmin extends FsShell {
       if (!HAUtil.isAtLeastOneActive(namenodes)) {
         throw new IOException("Cannot finalize with no NameNode active");
       }
-      for (ClientProtocol haNn : namenodes) {
-        haNn.finalizeUpgrade();
+
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().finalizeUpgrade();
+        System.out.println("Finalize upgrade successful for " +
+            proxy.getAddress());
       }
     } else {
       dfs.finalizeUpgrade();
+      System.out.println("Finalize upgrade successful");
     }
     
     return 0;
@@ -958,9 +1067,25 @@ public class DFSAdmin extends FsShell {
   public int metaSave(String[] argv, int idx) throws IOException {
     String pathname = argv[idx];
     DistributedFileSystem dfs = getDFS();
-    dfs.metaSave(pathname);
-    System.out.println("Created metasave file " + pathname + " in the log " +
-        "directory of namenode " + dfs.getUri());
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().metaSave(pathname);
+        System.out.println("Created metasave file " + pathname + " in the log "
+            + "directory of namenode " + proxy.getAddress());
+      }
+    } else {
+      dfs.metaSave(pathname);
+      System.out.println("Created metasave file " + pathname + " in the log " +
+          "directory of namenode " + dfs.getUri());
+    }
     return 0;
   }
 
@@ -1022,20 +1147,37 @@ public class DFSAdmin extends FsShell {
   public int refreshServiceAcl() throws IOException {
     // Get the current configuration
     Configuration conf = getConf();
-    
+
     // for security authorization
     // server principal for this call   
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
 
-    // Create the client
-    RefreshAuthorizationPolicyProtocol refreshProtocol =
-        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-            RefreshAuthorizationPolicyProtocol.class).getProxy();
-    
-    // Refresh the authorization policy in-effect
-    refreshProtocol.refreshServiceAcl();
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+    if (isHaEnabled) {
+      // Run refreshServiceAcl for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshAuthorizationPolicyProtocol.class);
+      for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
+        proxy.getProxy().refreshServiceAcl();
+        System.out.println("Refresh service acl successful for "
+            + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshAuthorizationPolicyProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshAuthorizationPolicyProtocol.class).getProxy();
+      // Refresh the authorization policy in-effect
+      refreshProtocol.refreshServiceAcl();
+      System.out.println("Refresh service acl successful");
+    }
     
     return 0;
   }
@@ -1054,14 +1196,32 @@ public class DFSAdmin extends FsShell {
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
- 
-    // Create the client
-    RefreshUserMappingsProtocol refreshProtocol =
-      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-          RefreshUserMappingsProtocol.class).getProxy();
 
-    // Refresh the user-to-groups mappings
-    refreshProtocol.refreshUserToGroupsMappings();
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+    if (isHaEnabled) {
+      // Run refreshUserToGroupsMapings for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshUserMappingsProtocol.class);
+      for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
+        proxy.getProxy().refreshUserToGroupsMappings();
+        System.out.println("Refresh user to groups mapping successful for "
+            + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshUserMappingsProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshUserMappingsProtocol.class).getProxy();
+
+      // Refresh the user-to-groups mappings
+      refreshProtocol.refreshUserToGroupsMappings();
+      System.out.println("Refresh user to groups mapping successful");
+    }
     
     return 0;
   }
@@ -1082,13 +1242,31 @@ public class DFSAdmin extends FsShell {
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
 
-    // Create the client
-    RefreshUserMappingsProtocol refreshProtocol =
-      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-          RefreshUserMappingsProtocol.class).getProxy();
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
 
-    // Refresh the user-to-groups mappings
-    refreshProtocol.refreshSuperUserGroupsConfiguration();
+    if (isHaEnabled) {
+      // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshUserMappingsProtocol.class);
+      for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
+        proxy.getProxy().refreshSuperUserGroupsConfiguration();
+        System.out.println("Refresh super user groups configuration " +
+            "successful for " + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshUserMappingsProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshUserMappingsProtocol.class).getProxy();
+
+      // Refresh the user-to-groups mappings
+      refreshProtocol.refreshSuperUserGroupsConfiguration();
+      System.out.println("Refresh super user groups configuration successful");
+    }
 
     return 0;
   }
@@ -1102,15 +1280,33 @@ public class DFSAdmin extends FsShell {
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
- 
-    // Create the client
-    RefreshCallQueueProtocol refreshProtocol =
-      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-          RefreshCallQueueProtocol.class).getProxy();
 
-    // Refresh the call queue
-    refreshProtocol.refreshCallQueue();
-    
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+    if (isHaEnabled) {
+      // Run refreshCallQueue for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshCallQueueProtocol.class);
+      for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
+        proxy.getProxy().refreshCallQueue();
+        System.out.println("Refresh call queue successful for "
+            + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshCallQueueProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshCallQueueProtocol.class).getProxy();
+
+      // Refresh the call queue
+      refreshProtocol.refreshCallQueue();
+      System.out.println("Refresh call queue successful");
+    }
+
     return 0;
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java Mon Jul  7 20:43:56 2014
@@ -36,8 +36,8 @@ public class SWebHdfsFileSystem extends 
   }
 
   @Override
-  protected synchronized void initializeTokenAspect() {
-    tokenAspect = new TokenAspect<SWebHdfsFileSystem>(this, tokenServiceName, TOKEN_KIND);
+  protected Text getTokenKind() {
+    return TOKEN_KIND;
   }
 
   @Override

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Mon Jul  7 20:43:56 2014
@@ -69,11 +69,14 @@ import org.apache.hadoop.io.retry.RetryP
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 
@@ -98,7 +101,7 @@ public class WebHdfsFileSystem extends F
 
   /** Delegation token kind */
   public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
-  protected TokenAspect<? extends WebHdfsFileSystem> tokenAspect;
+  private boolean canRefreshDelegationToken;
 
   private UserGroupInformation ugi;
   private URI uri;
@@ -127,13 +130,8 @@ public class WebHdfsFileSystem extends F
     return "http";
   }
 
-  /**
-   * Initialize tokenAspect. This function is intended to
-   * be overridden by SWebHdfsFileSystem.
-   */
-  protected synchronized void initializeTokenAspect() {
-    tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, tokenServiceName,
-        TOKEN_KIND);
+  protected Text getTokenKind() {
+    return TOKEN_KIND;
   }
 
   @Override
@@ -162,7 +160,6 @@ public class WebHdfsFileSystem extends F
     this.tokenServiceName = isLogicalUri ?
         HAUtil.buildTokenServiceForLogicalUri(uri)
         : SecurityUtil.buildTokenService(getCanonicalUri());
-    initializeTokenAspect();
 
     if (!isHA) {
       this.retryPolicy =
@@ -195,10 +192,8 @@ public class WebHdfsFileSystem extends F
     }
 
     this.workingDir = getHomeDirectory();
-
-    if (UserGroupInformation.isSecurityEnabled()) {
-      tokenAspect.initDelegationToken(ugi);
-    }
+    this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
+    this.delegationToken = null;
   }
 
   @Override
@@ -213,11 +208,46 @@ public class WebHdfsFileSystem extends F
     return b;
   }
 
+  TokenSelector<DelegationTokenIdentifier> tokenSelector =
+      new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(getTokenKind()){};
+
+  // the first getAuthParams() for a non-token op will either get the
+  // internal token from the ugi or lazy fetch one
   protected synchronized Token<?> getDelegationToken() throws IOException {
-    tokenAspect.ensureTokenInitialized();
+    if (canRefreshDelegationToken && delegationToken == null) {
+      Token<?> token = tokenSelector.selectToken(
+          new Text(getCanonicalServiceName()), ugi.getTokens());
+      // ugi tokens are usually indicative of a task which can't
+      // refetch tokens.  even if ugi has credentials, don't attempt
+      // to get another token to match hdfs/rpc behavior
+      if (token != null) {
+        LOG.debug("Using UGI token: " + token);
+        canRefreshDelegationToken = false; 
+      } else {
+        token = getDelegationToken(null);
+        if (token != null) {
+          LOG.debug("Fetched new token: " + token);
+        } else { // security is disabled
+          canRefreshDelegationToken = false;
+        }
+      }
+      setDelegationToken(token);
+    }
     return delegationToken;
   }
 
+  @VisibleForTesting
+  synchronized boolean replaceExpiredDelegationToken() throws IOException {
+    boolean replaced = false;
+    if (canRefreshDelegationToken) {
+      Token<?> token = getDelegationToken(null);
+      LOG.debug("Replaced expired token: " + token);
+      setDelegationToken(token);
+      replaced = (token != null);
+    }
+    return replaced;
+  }
+
   @Override
   protected int getDefaultPort() {
     return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
@@ -288,8 +318,8 @@ public class WebHdfsFileSystem extends F
     final int code = conn.getResponseCode();
     // server is demanding an authentication we don't support
     if (code == HttpURLConnection.HTTP_UNAUTHORIZED) {
-      throw new IOException(
-          new AuthenticationException(conn.getResponseMessage()));
+      // match hdfs/rpc exception
+      throw new AccessControlException(conn.getResponseMessage());
     }
     if (code != op.getExpectedHttpResponseCode()) {
       final Map<?, ?> m;
@@ -309,7 +339,15 @@ public class WebHdfsFileSystem extends F
         return m;
       }
 
-      final RemoteException re = JsonUtil.toRemoteException(m);
+      IOException re = JsonUtil.toRemoteException(m);
+      // extract UGI-related exceptions and unwrap InvalidToken
+      // the NN mangles these exceptions but the DN does not and may need
+      // to re-fetch a token if either report the token is expired
+      if (re.getMessage().startsWith("Failed to obtain user group information:")) {
+        String[] parts = re.getMessage().split(":\\s+", 3);
+        re = new RemoteException(parts[1], parts[2]);
+        re = ((RemoteException)re).unwrapRemoteException(InvalidToken.class);
+      }
       throw unwrapException? toIOException(re): re;
     }
     return null;
@@ -344,8 +382,6 @@ public class WebHdfsFileSystem extends F
    */
   private synchronized void resetStateToFailOver() {
     currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length;
-    delegationToken = null;
-    tokenAspect.reset();
   }
 
   /**
@@ -371,7 +407,7 @@ public class WebHdfsFileSystem extends F
     // Skip adding delegation token for token operations because these
     // operations require authentication.
     Token<?> token = null;
-    if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) {
+    if (!op.getRequireAuth()) {
       token = getDelegationToken();
     }
     if (token != null) {
@@ -542,11 +578,17 @@ public class WebHdfsFileSystem extends F
             validateResponse(op, conn, false);
           }
           return getResponse(conn);
-        } catch (IOException ioe) {
-          Throwable cause = ioe.getCause();
-          if (cause != null && cause instanceof AuthenticationException) {
-            throw ioe; // no retries for auth failures
+        } catch (AccessControlException ace) {
+          // no retries for auth failures
+          throw ace;
+        } catch (InvalidToken it) {
+          // try to replace the expired token with a new one.  the attempt
+          // to acquire a new token must be outside this operation's retry
+          // so if it fails after its own retries, this operation fails too.
+          if (op.getRequireAuth() || !replaceExpiredDelegationToken()) {
+            throw it;
           }
+        } catch (IOException ioe) {
           shouldRetry(ioe, retry);
         }
       }
@@ -714,6 +756,17 @@ public class WebHdfsFileSystem extends F
       };
     }
   }
+
+  class FsPathConnectionRunner extends AbstractFsPathRunner<HttpURLConnection> {
+    FsPathConnectionRunner(Op op, Path fspath, Param<?,?>... parameters) {
+      super(op, fspath, parameters);
+    }
+    @Override
+    HttpURLConnection getResponse(final HttpURLConnection conn)
+        throws IOException {
+      return conn;
+    }
+  }
   
   /**
    * Used by open() which tracks the resolved url itself
@@ -1079,16 +1132,41 @@ public class WebHdfsFileSystem extends F
       ) throws IOException {
     statistics.incrementReadOps(1);
     final HttpOpParam.Op op = GetOpParam.Op.OPEN;
-    final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
+    // use a runner so the open can recover from an invalid token
+    FsPathConnectionRunner runner =
+        new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize));
     return new FSDataInputStream(new OffsetUrlInputStream(
-        new OffsetUrlOpener(url), new OffsetUrlOpener(null)));
+        new UnresolvedUrlOpener(runner), new OffsetUrlOpener(null)));
   }
 
   @Override
-  public void close() throws IOException {
-    super.close();
-    synchronized (this) {
-      tokenAspect.removeRenewAction();
+  public synchronized void close() throws IOException {
+    try {
+      if (canRefreshDelegationToken && delegationToken != null) {
+        cancelDelegationToken(delegationToken);
+      }
+    } catch (IOException ioe) {
+      LOG.debug("Token cancel failed: "+ioe);
+    } finally {
+      super.close();
+    }
+  }
+
+  // use FsPathConnectionRunner to ensure retries for InvalidTokens
+  class UnresolvedUrlOpener extends ByteRangeInputStream.URLOpener {
+    private final FsPathConnectionRunner runner;
+    UnresolvedUrlOpener(FsPathConnectionRunner runner) {
+      super(null);
+      this.runner = runner;
+    }
+
+    @Override
+    protected HttpURLConnection connect(long offset, boolean resolved)
+        throws IOException {
+      assert offset == 0;
+      HttpURLConnection conn = runner.run();
+      setURL(conn.getURL());
+      return conn;
     }
   }
 
@@ -1141,7 +1219,7 @@ public class WebHdfsFileSystem extends F
   }
 
   static class OffsetUrlInputStream extends ByteRangeInputStream {
-    OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r)
+    OffsetUrlInputStream(UnresolvedUrlOpener o, OffsetUrlOpener r)
         throws IOException {
       super(o, r);
     }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java Mon Jul  7 20:43:56 2014
@@ -31,8 +31,11 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.ParamException;
 import com.sun.jersey.api.container.ContainerException;
 
@@ -42,9 +45,22 @@ public class ExceptionHandler implements
   public static final Log LOG = LogFactory.getLog(ExceptionHandler.class);
 
   private static Exception toCause(Exception e) {
-    final Throwable t = e.getCause();
-    if (t != null && t instanceof Exception) {
-      e = (Exception)e.getCause();
+    final Throwable t = e.getCause();    
+    if (e instanceof SecurityException) {
+      // For the issue reported in HDFS-6475, if SecurityException's cause
+      // is InvalidToken, and the InvalidToken's cause is StandbyException,
+      // return StandbyException; Otherwise, leave the exception as is,
+      // since they are handled elsewhere. See HDFS-6588.
+      if (t != null && t instanceof InvalidToken) {
+        final Throwable t1 = t.getCause();
+        if (t1 != null && t1 instanceof StandbyException) {
+          e = (StandbyException)t1;
+        }
+      }
+    } else {
+      if (t != null && t instanceof Exception) {
+        e = (Exception)t;
+      }
     }
     return e;
   }
@@ -74,6 +90,10 @@ public class ExceptionHandler implements
       e = ((RemoteException)e).unwrapRemoteException();
     }
 
+    if (e instanceof SecurityException) {
+      e = toCause(e);
+    }
+    
     //Map response status
     final Response.Status s;
     if (e instanceof SecurityException) {
@@ -96,4 +116,9 @@ public class ExceptionHandler implements
     final String js = JsonUtil.toJsonString(e);
     return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
   }
+  
+  @VisibleForTesting
+  public void initResponse(HttpServletResponse response) {
+    this.response = response;
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Mon Jul  7 20:43:56 2014
@@ -294,6 +294,7 @@ message SnapshottableDirectoryListingPro
 message SnapshotDiffReportEntryProto {
   required bytes fullpath = 1;
   required string modificationLabel = 2;
+  optional bytes targetPath = 3;
 }
 
 /**

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier Mon Jul  7 20:43:56 2014
@@ -13,3 +13,5 @@
 #
 org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
 org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$WebHdfsDelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$SWebHdfsDelegationTokenIdentifier

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Jul  7 20:43:56 2014
@@ -1996,4 +1996,16 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+  <value>0</value>
+  <description>The delay in seconds at which we will pause the blocks deletion
+    after Namenode startup. By default it's disabled.
+    In the case a directory has large number of directories and files are
+    deleted, suggested delay is one hour to give the administrator enough time
+    to notice large number of pending deletion blocks and take corrective
+    action.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1602934-1608600

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js Mon Jul  7 20:43:56 2014
@@ -58,7 +58,7 @@
           var msg = '<p>Path does not exist on HDFS or WebHDFS is disabled.  Please check your path or enable WebHDFS</p>';
           break;
         default:
-          var msg = '<p>Failed to retreive data from ' + url + ': ' + err + '</p>';
+          var msg = '<p>Failed to retrieve data from ' + url + ': ' + err + '</p>';
         }
       show_err_msg(msg);
     };

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Mon Jul  7 20:43:56 2014
@@ -88,6 +88,25 @@ HDFS NFS Gateway
   </property>
 ----
 
+   The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
+   that prevent it from working correctly by default with the HDFS NFS
+   Gateway. If you want to be able to access the HDFS NFS Gateway from AIX, you
+   should set the following configuration setting to enable work-arounds for these
+   issues:
+
+----
+<property>
+  <name>nfs.aix.compatibility.mode.enabled</name>
+  <value>true</value>
+</property>
+----
+
+   Note that regular, non-AIX clients should NOT enable AIX compatibility mode.
+   The work-arounds implemented by AIX compatibility mode effectively disable
+   safeguards to ensure that listing of directory contents via NFS returns
+   consistent results, and that all data sent to the NFS server can be assured to
+   have been committed.
+
    It's strongly recommended for the users to update a few configuration properties based on their use
    cases. All the related configuration properties can be added or updated in hdfs-site.xml.
   

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Mon Jul  7 20:43:56 2014
@@ -70,6 +70,18 @@ WebHDFS REST API
     * {{{Get Delegation Tokens}<<<GETDELEGATIONTOKENS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getDelegationTokens)
 
+    * {{{Get an XAttr}<<<GETXATTRS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttr)
+
+    * {{{Get multiple XAttrs}<<<GETXATTRS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs)
+
+    * {{{Get all XAttrs}<<<GETXATTRS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs)
+
+    * {{{List all XAttrs}<<<LISTXATTRS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
+
   * HTTP PUT
 
     * {{{Create and Write to a File}<<<CREATE>>>}}
@@ -108,6 +120,12 @@ WebHDFS REST API
     * {{{Rename Snapshot}<<<RENAMESNAPSHOT>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.renameSnapshot)
 
+    * {{{Set XAttr}<<<SETXATTR>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setXAttr)
+
+    * {{{Remove XAttr}<<<REMOVEXATTR>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeXAttr)
+
   * HTTP POST
 
     * {{{Append to a File}<<<APPEND>>>}}
@@ -909,6 +927,188 @@ Transfer-Encoding: chunked
   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
 
 
+* {Extended Attributes(XAttrs) Operations}
+
+** {Set XAttr}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=op=SETXATTR
+                              &xattr.name=<XATTRNAME>&xattr.value=<XATTRVALUE>
+                              &flag=<FLAG>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.setXAttr
+
+
+** {Remove XAttr}
+
+  * Submit a HTTP PUT request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=REMOVEXATTR
+                              &xattr.name=<XATTRNAME>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.removeXAttr
+
+
+** {Get an XAttr}
+
+  * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETXATTRS
+                              &xattr.name=<XATTRNAME>&encoding=<ENCODING>"
++---------------------------------
+
+  The client receives a response with a {{{XAttrs JSON Schema}<<<XAttrs>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+    "XAttrs": [
+        {
+            "name":"XATTRNAME",
+            "value":"XATTRVALUE"
+        }
+    ]
+}
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttr
+
+
+** {Get multiple XAttrs}
+
+  * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETXATTRS
+                              &xattr.name=<XATTRNAME1>&xattr.name=<XATTRNAME2>
+                              &encoding=<ENCODING>"
++---------------------------------
+
+  The client receives a response with a {{{XAttrs JSON Schema}<<<XAttrs>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+    "XAttrs": [
+        {
+            "name":"XATTRNAME1",
+            "value":"XATTRVALUE1"
+        },
+        {
+            "name":"XATTRNAME2",
+            "value":"XATTRVALUE2"
+        }
+    ]
+}
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs
+
+
+** {Get all XAttrs}
+
+  * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETXATTRS
+                              &encoding=<ENCODING>"
++---------------------------------
+
+  The client receives a response with a {{{XAttrs JSON Schema}<<<XAttrs>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+    "XAttrs": [
+        {
+            "name":"XATTRNAME1",
+            "value":"XATTRVALUE1"
+        },
+        {
+            "name":"XATTRNAME2",
+            "value":"XATTRVALUE2"
+        },
+        {
+            "name":"XATTRNAME3",
+            "value":"XATTRVALUE3"
+        }
+    ]
+}
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getXAttrs
+
+
+** {List all XAttrs}
+
+  * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=LISTXATTRS"
++---------------------------------
+
+  The client receives a response with a {{{XAttrNames JSON Schema}<<<XAttrNames>>> JSON object}}:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{
+    "XAttrNames":"[\"XATTRNAME1\",\"XATTRNAME2\",\"XATTRNAME3\"]"
+}
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs
+
+
 * {Snapshot Operations}
 
 ** {Create Snapshot}
@@ -1252,6 +1452,58 @@ Transfer-Encoding: chunked
 +---------------------------------
 
 
+** {XAttrs JSON Schema}
+
++---------------------------------
+{
+  "name"      : "XAttrs",
+  "properties":
+  {
+    "XAttrs":
+    {
+      "type"      : "array",
+      "items":
+      {
+        "type"    " "object",
+        "properties":
+        {
+          "name":
+          {
+            "description": "XAttr name.",
+            "type"       : "string",
+            "required"   : true
+          },
+          "value":
+          {
+            "description": "XAttr value.",
+            "type"       : "string"
+          }
+        }
+      }
+    }
+  }
+}
++---------------------------------
+
+
+** {XAttrNames JSON Schema}
+
++---------------------------------
+{
+  "name"      : "XAttrNames",
+  "properties":
+  {
+    "XAttrNames":
+    {
+      "description": "XAttr names.",
+      "type"       : "string"
+      "required"   : true
+    }
+  }
+}
++---------------------------------
+
+
 ** {Boolean JSON Schema}
 
 +---------------------------------
@@ -1688,6 +1940,83 @@ var tokenProperties =
 *----------------+-------------------------------------------------------------------+
 
 
+** {XAttr Name}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<xattr.name>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The XAttr name of a file/directory. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | Any string prefixed with user./trusted./system./security.. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | Any string prefixed with user./trusted./system./security.. |
+*----------------+-------------------------------------------------------------------+
+
+
+** {XAttr Value}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<xattr.value>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The XAttr value of a file/directory. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | An encoded value. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | Enclosed in double quotes or prefixed with 0x or 0s. |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{./ExtendedAttributes.html}Extended Attributes}}
+
+
+** {XAttr set flag}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<flag>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The XAttr set flag. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | CREATE,REPLACE. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | CREATE,REPLACE. |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{./ExtendedAttributes.html}Extended Attributes}}
+
+
+** {XAttr value encoding}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<encoding>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The XAttr value encoding. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | text \| hex \| base64 |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | text \| hex \| base64 |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{./ExtendedAttributes.html}Extended Attributes}}
+
+
 ** {Access Time}
 
 *----------------+-------------------------------------------------------------------+

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml?rev=1608603&r1=1608602&r2=1608603&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml Mon Jul  7 20:43:56 2014
@@ -97,7 +97,9 @@
     <li>Listing the files in snapshot <code>s0</code>:
       <source>hdfs dfs -ls /foo/.snapshot/s0</source></li>
     <li>Copying a file from snapshot <code>s0</code>:
-      <source>hdfs dfs -cp /foo/.snapshot/s0/bar /tmp</source></li>
+      <source>hdfs dfs -cp -ptopax /foo/.snapshot/s0/bar /tmp</source>
+      <p>Note that this example uses the preserve option to preserve
+         timestamps, ownership, permission, ACLs and XAttrs.</p></li>
   </ul>
   </subsection>
   </section>
@@ -255,7 +257,35 @@
       <tr><td>fromSnapshot</td><td>The name of the starting snapshot.</td></tr>
       <tr><td>toSnapshot</td><td>The name of the ending snapshot.</td></tr>
     </table></li>
-  </ul>
+    <li>Results:
+      <table>
+        <tr><td>+</td><td>The file/directory has been created.</td></tr>
+        <tr><td>-</td><td>The file/directory has been deleted.</td></tr>
+        <tr><td>M</td><td>The file/directory has been modified.</td></tr>
+        <tr><td>R</td><td>The file/directory has been renamed.</td></tr>
+      </table>
+    </li>
+  </ul>
+  <p>
+    A <em>RENAME</em> entry indicates a file/directory has been renamed but
+    is still under the same snapshottable directory. A file/directory is
+    reported as deleted if it was renamed to outside of the snapshottble directory.
+    A file/directory renamed from outside of the snapshottble directory is
+    reported as newly created.
+  </p>
+  <p>
+    The snapshot difference report does not guarantee the same operation sequence.
+    For example, if we rename the directory <em>"/foo"</em> to <em>"/foo2"</em>, and
+    then append new data to the file <em>"/foo2/bar"</em>, the difference report will
+    be:
+    <source>
+    R. /foo -> /foo2
+    M. /foo/bar
+    </source>
+    I.e., the changes on the files/directories under a renamed directory is
+    reported using the original path before the rename (<em>"/foo/bar"</em> in
+    the above example).
+  </p>
   <p>
     See also the corresponding Java API
     <code>SnapshotDiffReport getSnapshotDiffReport(Path path, String fromSnapshot, String toSnapshot)</code>



Mime
View raw message