hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From umamah...@apache.org
Subject svn commit: r1601992 [2/3] - in /hadoop/common/branches/branch-2/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main/java/org/apache/hadoop/fs/ hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ hadoop-hdfs/src/main/java...
Date Wed, 11 Jun 2014 19:25:20 GMT
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Jun 11 19:25:17 2014
@@ -134,6 +134,8 @@ import org.apache.hadoop.fs.ParentNotDir
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -497,7 +499,7 @@ public class FSNamesystem implements Nam
   
   private final RetryCache retryCache;
 
-  private final AclConfigFlag aclConfigFlag;
+  private final NNConf nnConf;
 
   /**
    * Set the last allocated inode id when fsimage or editlog is loaded. 
@@ -766,7 +768,7 @@ public class FSNamesystem implements Nam
       this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
         auditLoggers.get(0) instanceof DefaultAuditLogger;
       this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
-      this.aclConfigFlag = new AclConfigFlag(conf);
+      this.nnConf = new NNConf(conf);
     } catch(IOException e) {
       LOG.error(getClass().getSimpleName() + " initialization failed.", e);
       close();
@@ -1103,8 +1105,10 @@ public class FSNamesystem implements Nam
         // so that the tailer starts from the right spot.
         dir.fsImage.updateLastAppliedTxIdFromWritten();
       }
-      cacheManager.stopMonitorThread();
-      cacheManager.clearDirectiveStats();
+      if (cacheManager != null) {
+        cacheManager.stopMonitorThread();
+        cacheManager.clearDirectiveStats();
+      }
       blockManager.getDatanodeManager().clearPendingCachingCommands();
       blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
       // Don't want to keep replication queues when not in Active.
@@ -7770,7 +7774,7 @@ public class FSNamesystem implements Nam
   }
 
   void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
-    aclConfigFlag.checkForApiCall();
+    nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -7792,7 +7796,7 @@ public class FSNamesystem implements Nam
   }
 
   void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
-    aclConfigFlag.checkForApiCall();
+    nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -7814,7 +7818,7 @@ public class FSNamesystem implements Nam
   }
 
   void removeDefaultAcl(String src) throws IOException {
-    aclConfigFlag.checkForApiCall();
+    nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -7836,7 +7840,7 @@ public class FSNamesystem implements Nam
   }
 
   void removeAcl(String src) throws IOException {
-    aclConfigFlag.checkForApiCall();
+    nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -7858,7 +7862,7 @@ public class FSNamesystem implements Nam
   }
 
   void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
-    aclConfigFlag.checkForApiCall();
+    nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -7880,7 +7884,7 @@ public class FSNamesystem implements Nam
   }
 
   AclStatus getAclStatus(String src) throws IOException {
-    aclConfigFlag.checkForApiCall();
+    nnConf.checkAclsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
     readLock();
@@ -7894,6 +7898,167 @@ public class FSNamesystem implements Nam
       readUnlock();
     }
   }
+  
+  /**
+   * Set xattr for a file or directory.
+   * 
+   * @param src
+   *          - path on which it sets the xattr
+   * @param xAttr
+   *          - xAttr details to set
+   * @param flag
+   *          - xAttrs flags
+   * @throws AccessControlException
+   * @throws SafeModeException
+   * @throws UnresolvedLinkException
+   * @throws IOException
+   */
+  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+      throws AccessControlException, SafeModeException,
+      UnresolvedLinkException, IOException {
+    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+    boolean success = false;
+    try {
+      setXAttrInt(src, xAttr, flag, cacheEntry != null);
+      success = true;
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "setXAttr", src);
+      throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
+    }
+  }
+  
+  private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
+      boolean logRetryCache) throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    checkXAttrSize(xAttr);
+    HdfsFileStatus resultingStat = null;
+    FSPermissionChecker pc = getPermissionChecker();
+    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+    checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot set XAttr on " + src);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      if (isPermissionEnabled) {
+        checkOwner(pc, src);
+        checkPathAccess(pc, src, FsAction.WRITE);
+      }
+      dir.setXAttr(src, xAttr, flag, logRetryCache);
+      resultingStat = getAuditFileInfo(src, false);
+    } finally {
+      writeUnlock();
+    }
+    getEditLog().logSync();
+    logAuditEvent(true, "setXAttr", src, null, resultingStat);
+  }
+
+  /**
+   * Verifies that the combined size of the name and value of an xattr is within
+   * the configured limit. Setting a limit of zero disables this check.
+   */
+  private void checkXAttrSize(XAttr xAttr) {
+    if (nnConf.xattrMaxSize == 0) {
+      return;
+    }
+    int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
+    if (xAttr.getValue() != null) {
+      size += xAttr.getValue().length;
+    }
+    if (size > nnConf.xattrMaxSize) {
+      throw new HadoopIllegalArgumentException(
+          "The XAttr is too big. The maximum combined size of the"
+          + " name and value is " + nnConf.xattrMaxSize
+          + ", but the total size is " + size);
+    }
+  }
+  
+  List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    FSPermissionChecker pc = getPermissionChecker();
+    boolean getAll = xAttrs == null || xAttrs.isEmpty();
+    List<XAttr> filteredXAttrs = null;
+    if (!getAll) {
+      filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
+      if (filteredXAttrs.isEmpty()) {
+        return filteredXAttrs;
+      }
+    }
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      if (isPermissionEnabled) {
+        checkPathAccess(pc, src, FsAction.READ);
+      }
+      List<XAttr> all = dir.getXAttrs(src);
+      List<XAttr> filteredAll = XAttrPermissionFilter.
+          filterXAttrsForApi(pc, all);
+      if (getAll) {
+        return filteredAll;
+      } else {
+        if (filteredAll == null || filteredAll.isEmpty()) {
+          return null;
+        }
+        List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
+        for (XAttr xAttr : filteredXAttrs) {
+          for (XAttr a : filteredAll) {
+            if (xAttr.getNameSpace() == a.getNameSpace()
+                && xAttr.getName().equals(a.getName())) {
+              toGet.add(a);
+              break;
+            }
+          }
+        }
+        return toGet;
+      }
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "getXAttrs", src);
+      throw e;
+    } finally {
+      readUnlock();
+    }
+  }
+  
+  void removeXAttr(String src, XAttr xAttr) throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    HdfsFileStatus resultingStat = null;
+    FSPermissionChecker pc = getPermissionChecker();
+    try {
+      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "removeXAttr", src);
+      throw e;
+    }
+    checkOperation(OperationCategory.WRITE);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      if (isPermissionEnabled) {
+        checkOwner(pc, src);
+        checkPathAccess(pc, src, FsAction.WRITE);
+      }
+      
+      dir.removeXAttr(src, xAttr);
+      resultingStat = getAuditFileInfo(src, false);
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "removeXAttr", src);
+      throw e;
+    } finally {
+      writeUnlock();
+    }
+    getEditLog().logSync();
+    logAuditEvent(true, "removeXAttr", src, null, resultingStat);
+  }
 
   /**
    * Default AuditLogger implementation; used when no access logger is
@@ -7980,6 +8145,5 @@ public class FSNamesystem implements Nam
       logger.addAppender(asyncAppender);        
     }
   }
-
 }
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Wed Jun 11 19:25:17 2014
@@ -177,6 +177,44 @@ public abstract class INode implements I
     nodeToUpdate.removeAclFeature();
     return nodeToUpdate;
   }
+
+  /**
+   * @param snapshotId
+   *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
+   *          from the given snapshot; otherwise, get the result from the
+   *          current inode.
+   * @return XAttrFeature
+   */  
+  abstract XAttrFeature getXAttrFeature(int snapshotId);
+  
+  @Override
+  public final XAttrFeature getXAttrFeature() {
+    return getXAttrFeature(Snapshot.CURRENT_STATE_ID);
+  }
+  
+  /**
+   * Set <code>XAttrFeature</code> 
+   */
+  abstract void addXAttrFeature(XAttrFeature xAttrFeature);
+  
+  final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId) 
+      throws QuotaExceededException {
+    final INode nodeToUpdate = recordModification(latestSnapshotId);
+    nodeToUpdate.addXAttrFeature(xAttrFeature);
+    return nodeToUpdate;
+  }
+  
+  /**
+   * Remove <code>XAttrFeature</code> 
+   */
+  abstract void removeXAttrFeature();
+  
+  final INode removeXAttrFeature(int lastestSnapshotId)
+      throws QuotaExceededException {
+    final INode nodeToUpdate = recordModification(lastestSnapshotId);
+    nodeToUpdate.removeXAttrFeature();
+    return nodeToUpdate;
+  }
   
   /**
    * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java Wed Jun 11 19:25:17 2014
@@ -21,6 +21,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * The attributes of an inode.
@@ -50,6 +51,9 @@ public interface INodeAttributes {
 
   /** @return the ACL feature. */
   public AclFeature getAclFeature();
+  
+  /** @return the XAttrs feature. */
+  public XAttrFeature getXAttrFeature();
 
   /** @return the modification time. */
   public long getModificationTime();
@@ -64,14 +68,17 @@ public interface INodeAttributes {
     private final AclFeature aclFeature;
     private final long modificationTime;
     private final long accessTime;
+    private XAttrFeature xAttrFeature;
 
     SnapshotCopy(byte[] name, PermissionStatus permissions,
-        AclFeature aclFeature, long modificationTime, long accessTime) {
+        AclFeature aclFeature, long modificationTime, long accessTime, 
+        XAttrFeature xAttrFeature) {
       this.name = name;
       this.permission = PermissionStatusFormat.toLong(permissions);
       this.aclFeature = aclFeature;
       this.modificationTime = modificationTime;
       this.accessTime = accessTime;
+      this.xAttrFeature = xAttrFeature;
     }
 
     SnapshotCopy(INode inode) {
@@ -80,6 +87,7 @@ public interface INodeAttributes {
       this.aclFeature = inode.getAclFeature();
       this.modificationTime = inode.getModificationTime();
       this.accessTime = inode.getAccessTime();
+      this.xAttrFeature = inode.getXAttrFeature();
     }
 
     @Override
@@ -128,5 +136,10 @@ public interface INodeAttributes {
     public final long getAccessTime() {
       return accessTime;
     }
+    
+    @Override
+    public final XAttrFeature getXAttrFeature() {
+      return xAttrFeature;
+    }
   }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java Wed Jun 11 19:25:17 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 import com.google.common.base.Preconditions;
 
@@ -35,8 +36,9 @@ public interface INodeDirectoryAttribute
   public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
       implements INodeDirectoryAttributes {
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
-        AclFeature aclFeature, long modificationTime) {
-      super(name, permissions, aclFeature, modificationTime, 0L);
+        AclFeature aclFeature, long modificationTime, 
+        XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature);
     }
 
     public SnapshotCopy(INodeDirectory dir) {
@@ -63,8 +65,8 @@ public interface INodeDirectoryAttribute
 
     public CopyWithQuota(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long nsQuota,
-        long dsQuota) {
-      super(name, permissions, aclFeature, modificationTime);
+        long dsQuota, XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
       this.nsQuota = nsQuota;
       this.dsQuota = dsQuota;
     }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java Wed Jun 11 19:25:17 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * The attributes of a file.
@@ -42,8 +43,9 @@ public interface INodeFileAttributes ext
 
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long accessTime,
-        short replication, long preferredBlockSize) {
-      super(name, permissions, aclFeature, modificationTime, accessTime);
+        short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
+      super(name, permissions, aclFeature, modificationTime, accessTime, 
+          xAttrsFeature);
 
       final long h = HeaderFormat.combineReplication(0L, replication);
       header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Wed Jun 11 19:25:17 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 import com.google.common.base.Preconditions;
 
@@ -228,6 +229,21 @@ public abstract class INodeReference ext
   final void removeAclFeature() {
     referred.removeAclFeature();
   }
+  
+  @Override
+  final XAttrFeature getXAttrFeature(int snapshotId) {
+    return referred.getXAttrFeature(snapshotId);
+  }
+  
+  @Override
+  final void addXAttrFeature(XAttrFeature xAttrFeature) {
+    referred.addXAttrFeature(xAttrFeature);
+  }
+  
+  @Override
+  final void removeXAttrFeature() {
+    referred.removeXAttrFeature();
+  }
 
   @Override
   public final short getFsPermissionShort() {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java Wed Jun 11 19:25:17 2014
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.AclFeature;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 
 /**
  * An {@link INode} representing a symbolic link.
@@ -130,4 +131,19 @@ public class INodeSymlink extends INodeW
   public void addAclFeature(AclFeature f) {
     throw new UnsupportedOperationException("ACLs are not supported on symlinks");
   }
+
+  @Override
+  final XAttrFeature getXAttrFeature(int snapshotId) {
+    throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
+  }
+  
+  @Override
+  public void removeXAttrFeature() {
+    throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
+  }
+  
+  @Override
+  public void addXAttrFeature(XAttrFeature f) {
+    throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java Wed Jun 11 19:25:17 2014
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.permission.P
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
 
 import com.google.common.base.Preconditions;
@@ -340,6 +341,30 @@ public abstract class INodeWithAdditiona
 
     addFeature(f);
   }
+  
+  @Override
+  XAttrFeature getXAttrFeature(int snapshotId) {
+    if (snapshotId != Snapshot.CURRENT_STATE_ID) {
+      return getSnapshotINode(snapshotId).getXAttrFeature();
+    }
+
+    return getFeature(XAttrFeature.class);
+  }
+  
+  @Override
+  public void removeXAttrFeature() {
+    XAttrFeature f = getXAttrFeature();
+    Preconditions.checkNotNull(f);
+    removeFeature(f);
+  }
+  
+  @Override
+  public void addXAttrFeature(XAttrFeature f) {
+    XAttrFeature f1 = getXAttrFeature();
+    Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
+    
+    addFeature(f);
+  }
 
   public final Feature[] getFeatures() {
     return features;

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java Wed Jun 11 19:25:17 2014
@@ -64,7 +64,8 @@ public class NameNodeLayoutVersion { 
    */
   public static enum Feature implements LayoutFeature {
     ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
-    EDITLOG_LENGTH(-56, "Add length field to every edit log op");
+    EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
+    XATTRS(-57, "Extended attributes");
     
     private final FeatureInfo info;
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Wed Jun 11 19:25:17 2014
@@ -49,6 +49,8 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -1377,5 +1379,22 @@ class NameNodeRpcServer implements Namen
   public AclStatus getAclStatus(String src) throws IOException {
     return namesystem.getAclStatus(src);
   }
+  
+  @Override
+  public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+      throws IOException {
+    namesystem.setXAttr(src, xAttr, flag);
+  }
+  
+  @Override
+  public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) 
+      throws IOException {
+    return namesystem.getXAttrs(src, xAttrs);
+  }
+  
+  @Override
+  public void removeXAttr(String src, XAttr xAttr) throws IOException {
+    namesystem.removeXAttr(src, xAttr);
+  }
 }
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java Wed Jun 11 19:25:17 2014
@@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.hdfs.util.Diff.ListType;
 
 import com.google.common.base.Preconditions;
@@ -215,11 +216,16 @@ public class FSImageFormatPBSnapshot {
             acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
                 fileInPb.getAcl(), state.getStringTable()));
           }
+          XAttrFeature xAttrs = null;
+          if (fileInPb.hasXAttrs()) {
+            xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
+                fileInPb.getXAttrs(), state.getStringTable()));
+          }
 
           copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
               .toByteArray(), permission, acl, fileInPb.getModificationTime(),
               fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
-              fileInPb.getPreferredBlockSize());
+              fileInPb.getPreferredBlockSize(), xAttrs);
         }
 
         FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
@@ -310,16 +316,21 @@ public class FSImageFormatPBSnapshot {
             acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
                 dirCopyInPb.getAcl(), state.getStringTable()));
           }
+          XAttrFeature xAttrs = null;
+          if (dirCopyInPb.hasXAttrs()) {
+            xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
+                dirCopyInPb.getXAttrs(), state.getStringTable()));
+          }
 
           long modTime = dirCopyInPb.getModificationTime();
           boolean noQuota = dirCopyInPb.getNsQuota() == -1
               && dirCopyInPb.getDsQuota() == -1;
 
           copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
-              permission, acl, modTime)
+              permission, acl, modTime, xAttrs)
               : new INodeDirectoryAttributes.CopyWithQuota(name, permission,
                   acl, modTime, dirCopyInPb.getNsQuota(),
-                  dirCopyInPb.getDsQuota());
+                  dirCopyInPb.getDsQuota(), xAttrs);
         }
         // load created list
         List<INode> clist = loadCreatedList(in, dir,

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java Wed Jun 11 19:25:17 2014
@@ -34,8 +34,10 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 
+import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
@@ -144,9 +146,20 @@ public class Snapshot implements Compara
   /** The root directory of the snapshot. */
   static public class Root extends INodeDirectory {
     Root(INodeDirectory other) {
-      // Always preserve ACL.
+      // Always preserve ACL, XAttr.
       super(other, false, Lists.newArrayList(
-        Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class))
+        Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {
+
+          @Override
+          public boolean apply(Feature input) {
+            if (AclFeature.class.isInstance(input) 
+                || XAttrFeature.class.isInstance(input)) {
+              return true;
+            }
+            return false;
+          }
+          
+        }))
         .toArray(new Feature[0]));
     }
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Wed Jun 11 19:25:17 2014
@@ -28,6 +28,7 @@ import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.EnumSet;
+import java.util.List;
 
 import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
@@ -53,8 +54,10 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -103,6 +106,10 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
+import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
+import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
+import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
@@ -344,6 +351,12 @@ public class NamenodeWebHdfsMethods {
           final TokenArgumentParam delegationTokenArgument,
       @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) 
           final AclPermissionParam aclPermission,
+      @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
+          final XAttrNameParam xattrName,
+      @QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT) 
+          final XAttrValueParam xattrValue,
+      @QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT) 
+          final XAttrSetFlagParam xattrSetFlag,
       @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
           final SnapshotNameParam snapshotName,
       @QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
@@ -352,7 +365,8 @@ public class NamenodeWebHdfsMethods {
     return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
         owner, group, permission, overwrite, bufferSize, replication,
         blockSize, modificationTime, accessTime, renameOptions, createParent,
-        delegationTokenArgument, aclPermission, snapshotName, oldSnapshotName);
+        delegationTokenArgument, aclPermission, xattrName, xattrValue,
+        xattrSetFlag, snapshotName, oldSnapshotName);
   }
 
   /** Handle HTTP PUT request. */
@@ -399,6 +413,12 @@ public class NamenodeWebHdfsMethods {
           final TokenArgumentParam delegationTokenArgument,
       @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) 
           final AclPermissionParam aclPermission,
+      @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
+          final XAttrNameParam xattrName,
+      @QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT) 
+          final XAttrValueParam xattrValue,
+      @QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT) 
+          final XAttrSetFlagParam xattrSetFlag,
       @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
           final SnapshotNameParam snapshotName,
       @QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
@@ -408,7 +428,8 @@ public class NamenodeWebHdfsMethods {
     init(ugi, delegation, username, doAsUser, path, op, destination, owner,
         group, permission, overwrite, bufferSize, replication, blockSize,
         modificationTime, accessTime, renameOptions, delegationTokenArgument,
-        aclPermission, snapshotName, oldSnapshotName);
+        aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
+        oldSnapshotName);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -418,8 +439,8 @@ public class NamenodeWebHdfsMethods {
               path.getAbsolutePath(), op, destination, owner, group,
               permission, overwrite, bufferSize, replication, blockSize,
               modificationTime, accessTime, renameOptions, createParent,
-              delegationTokenArgument, aclPermission, snapshotName,
-              oldSnapshotName);
+              delegationTokenArgument, aclPermission, xattrName, xattrValue,
+              xattrSetFlag, snapshotName, oldSnapshotName);
         } finally {
           reset();
         }
@@ -448,6 +469,9 @@ public class NamenodeWebHdfsMethods {
       final CreateParentParam createParent,
       final TokenArgumentParam delegationTokenArgument,
       final AclPermissionParam aclPermission,
+      final XAttrNameParam xattrName,
+      final XAttrValueParam xattrValue, 
+      final XAttrSetFlagParam xattrSetFlag,
       final SnapshotNameParam snapshotName,
       final OldSnapshotNameParam oldSnapshotName
       ) throws IOException, URISyntaxException {
@@ -549,6 +573,17 @@ public class NamenodeWebHdfsMethods {
       np.setAcl(fullpath, aclPermission.getAclPermission(true));
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     }
+    case SETXATTR: {
+      np.setXAttr(
+          fullpath,
+          XAttrHelper.buildXAttr(xattrName.getXAttrName(),
+              xattrValue.getXAttrValue()), xattrSetFlag.getFlag());
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
+    }
+    case REMOVEXATTR: {
+      np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
+    }
     case CREATESNAPSHOT: {
       String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
       final String js = JsonUtil.toJsonString(
@@ -675,10 +710,14 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
           final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
-          final BufferSizeParam bufferSize
+          final BufferSizeParam bufferSize,
+      @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
+          final XAttrNameParam xattrName,
+      @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
+          final XAttrEncodingParam xattrEncoding
       ) throws IOException, InterruptedException {
-    return get(ugi, delegation, username, doAsUser, ROOT, op,
-        offset, length, renewer, bufferSize);
+    return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
+        renewer, bufferSize, xattrName, xattrEncoding);
   }
 
   /** Handle HTTP GET request. */
@@ -703,18 +742,23 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
           final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
-          final BufferSizeParam bufferSize
+          final BufferSizeParam bufferSize,
+      @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
+          final XAttrNameParam xattrName,
+      @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
+          final XAttrEncodingParam xattrEncoding
       ) throws IOException, InterruptedException {
 
-    init(ugi, delegation, username, doAsUser, path, op,
-        offset, length, renewer, bufferSize);
+    init(ugi, delegation, username, doAsUser, path, op, offset, length,
+        renewer, bufferSize, xattrName, xattrEncoding);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       public Response run() throws IOException, URISyntaxException {
         try {
           return get(ugi, delegation, username, doAsUser,
-              path.getAbsolutePath(), op, offset, length, renewer, bufferSize);
+              path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
+              xattrName, xattrEncoding);
         } finally {
           reset();
         }
@@ -732,7 +776,9 @@ public class NamenodeWebHdfsMethods {
       final OffsetParam offset,
       final LengthParam length,
       final RenewerParam renewer,
-      final BufferSizeParam bufferSize
+      final BufferSizeParam bufferSize,
+      final XAttrNameParam xattrName,
+      final XAttrEncodingParam xattrEncoding
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = getRPCServer(namenode);
@@ -807,6 +853,19 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(status);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case GETXATTR: {
+      XAttr xAttr = XAttrHelper.getFirstXAttr(np.getXAttrs(fullpath,
+          XAttrHelper.buildXAttrAsList(xattrName.getXAttrName())));
+      final String js = JsonUtil.toJsonString(xAttr,
+          xattrEncoding.getEncoding());
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
+    case GETXATTRS: {
+      List<XAttr> xAttrs = np.getXAttrs(fullpath, null);
+      final String js = JsonUtil.toJsonString(xAttrs,
+          xattrEncoding.getEncoding());
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Wed Jun 11 19:25:17 2014
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -34,6 +35,8 @@ import org.apache.hadoop.util.DataChecks
 import org.apache.hadoop.util.StringUtils;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.collect.Maps;
+
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -664,4 +667,125 @@ public class JsonUtil {
     aclStatusBuilder.addEntries(aclEntryList);
     return aclStatusBuilder.build();
   }
+  
+  public static String toJsonString(final XAttr xAttr, 
+      final XAttrCodec encoding) throws IOException {
+    if (xAttr == null) {
+      return "{}";
+    }
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put("name", XAttrHelper.getPrefixName(xAttr));
+    m.put("value", xAttr.getValue() != null ? 
+        XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
+    final Map<String, Map<String, Object>> finalMap =
+        new TreeMap<String, Map<String, Object>>();
+    finalMap.put(XAttr.class.getSimpleName(), m);
+    return JSON.toString(finalMap);
+  }
+  
+  private static Map<String, Object> toJsonMap(final XAttr xAttr,
+      final XAttrCodec encoding) throws IOException {
+    if (xAttr == null) {
+      return null;
+    }
+ 
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put("name", XAttrHelper.getPrefixName(xAttr));
+    m.put("value", xAttr.getValue() != null ? 
+        XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
+    return m;
+  }
+  
+  private static Object[] toJsonArray(final List<XAttr> array,
+      final XAttrCodec encoding) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.size() == 0) {
+      return EMPTY_OBJECT_ARRAY;
+    } else {
+      final Object[] a = new Object[array.size()];
+      for(int i = 0; i < array.size(); i++) {
+        a[i] = toJsonMap(array.get(i), encoding);
+      }
+      return a;
+    }
+  }
+  
+  public static String toJsonString(final List<XAttr> xAttrs, 
+      final XAttrCodec encoding) throws IOException {
+    final Map<String, Object> finalMap = new TreeMap<String, Object>();
+    finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
+    return JSON.toString(finalMap);
+  }
+  
+  public static XAttr toXAttr(final Map<?, ?> json) throws IOException {
+    if (json == null) {
+      return null;
+    }
+    
+    Map<?, ?> m = (Map<?, ?>) json.get(XAttr.class.getSimpleName());
+    if (m == null) {
+      return null;
+    }
+    String name = (String) m.get("name");
+    String value = (String) m.get("value");
+    return XAttrHelper.buildXAttr(name, decodeXAttrValue(value));
+  }
+  
+  public static Map<String, byte[]> toXAttrs(final Map<?, ?> json) 
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+    
+    return toXAttrMap((Object[])json.get("XAttrs"));
+  }
+  
+  public static Map<String, byte[]> toXAttrs(final Map<?, ?> json, 
+      List<String> names) throws IOException {
+    if (json == null || names == null) {
+      return null;
+    }
+    if (names.isEmpty()) {
+      return Maps.newHashMap();
+    }
+    Map<String, byte[]> xAttrs = toXAttrs(json);
+    if (xAttrs == null || xAttrs.isEmpty()) {
+      return xAttrs;
+    }
+    
+    Map<String, byte[]> result = Maps.newHashMap();
+    for (String name : names) {
+      if (xAttrs.containsKey(name)) {
+        result.put(name, xAttrs.get(name));
+      }
+    }
+    return result;
+  }
+  
+  private static Map<String, byte[]> toXAttrMap(final Object[] objects) 
+      throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return Maps.newHashMap();
+    } else {
+      final Map<String, byte[]> xAttrs = Maps.newHashMap();
+      for(int i = 0; i < objects.length; i++) {
+        Map<?, ?> m = (Map<?, ?>) objects[i];
+        String name = (String) m.get("name");
+        String value = (String) m.get("value");
+        xAttrs.put(name, decodeXAttrValue(value));
+      }
+      return xAttrs;
+    }
+  }
+  
+  private static byte[] decodeXAttrValue(String value) throws IOException {
+    if (value != null) {
+      return XAttrCodec.decodeValue(value);
+    } else {
+      return new byte[0];
+    }
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Wed Jun 11 19:25:17 2014
@@ -30,6 +30,7 @@ import java.net.URI;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
@@ -49,6 +50,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -813,6 +817,66 @@ public class WebHdfsFileSystem extends F
         new RenameOptionSetParam(options)
     ).run();
   }
+  
+  @Override
+  public void setXAttr(Path p, String name, byte[] value, 
+      EnumSet<XAttrSetFlag> flag) throws IOException {
+    statistics.incrementWriteOps(1);
+    final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
+    if (value != null) {
+      new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
+          XAttrCodec.encodeValue(value, XAttrCodec.HEX)), 
+          new XAttrSetFlagParam(flag)).run();
+    } else {
+      new FsPathRunner(op, p, new XAttrNameParam(name), 
+          new XAttrSetFlagParam(flag)).run();
+    }
+  }
+  
+  @Override
+  public byte[] getXAttr(Path p, String name) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETXATTR;
+    return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name), 
+        new XAttrEncodingParam(XAttrCodec.HEX)) {
+      @Override
+      byte[] decodeResponse(Map<?, ?> json) throws IOException {
+        XAttr xAttr = JsonUtil.toXAttr(json);
+        return xAttr != null ? xAttr.getValue() : null;
+      }
+    }.run();
+  }
+  
+  @Override
+  public Map<String, byte[]> getXAttrs(Path p) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
+    return new FsPathResponseRunner<Map<String, byte[]>>(op, p, 
+        new XAttrEncodingParam(XAttrCodec.HEX)) {
+      @Override
+      Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
+        return JsonUtil.toXAttrs(json);
+      }
+    }.run();
+  }
+  
+  @Override
+  public Map<String, byte[]> getXAttrs(Path p, final List<String> names) 
+      throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
+    return new FsPathResponseRunner<Map<String, byte[]>>(op, p, 
+        new XAttrEncodingParam(XAttrCodec.HEX)) {
+      @Override
+      Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
+        return JsonUtil.toXAttrs(json, names);
+      }
+    }.run();
+  }
+  
+  @Override
+  public void removeXAttr(Path p, String name) throws IOException {
+    statistics.incrementWriteOps(1);
+    final HttpOpParam.Op op = PutOpParam.Op.REMOVEXATTR;
+    new FsPathRunner(op, p, new XAttrNameParam(name)).run();
+  }
 
   @Override
   public void setOwner(final Path p, final String owner, final String group

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java Wed Jun 11 19:25:17 2014
@@ -36,6 +36,8 @@ public class GetOpParam extends HttpOpPa
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
     GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
     GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
+    GETXATTR(false, HttpURLConnection.HTTP_OK),
+    GETXATTRS(false, HttpURLConnection.HTTP_OK),
 
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java Wed Jun 11 19:25:17 2014
@@ -42,6 +42,9 @@ public class PutOpParam extends HttpOpPa
     REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
     REMOVEACL(false, HttpURLConnection.HTTP_OK),
     SETACL(false, HttpURLConnection.HTTP_OK),
+    
+    SETXATTR(false, HttpURLConnection.HTTP_OK), 
+    REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
 
     CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
     RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Wed Jun 11 19:25:17 2014
@@ -31,6 +31,7 @@ package hadoop.hdfs;
 import "Security.proto";
 import "hdfs.proto";
 import "acl.proto";
+import "xattr.proto";
 
 /**
  * The ClientNamenodeProtocol Service defines the interface between a client 
@@ -759,4 +760,10 @@ service ClientNamenodeProtocol {
       returns(SetAclResponseProto);
   rpc getAclStatus(GetAclStatusRequestProto)
       returns(GetAclStatusResponseProto);
+  rpc setXAttr(SetXAttrRequestProto)
+      returns(SetXAttrResponseProto);
+  rpc getXAttrs(GetXAttrsRequestProto)
+      returns(GetXAttrsResponseProto);
+  rpc removeXAttr(RemoveXAttrRequestProto)
+      returns(RemoveXAttrResponseProto);
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto Wed Jun 11 19:25:17 2014
@@ -23,6 +23,7 @@ package hadoop.hdfs.fsimage;
 
 import "hdfs.proto";
 import "acl.proto";
+import "xattr.proto";
 
 /**
  * This file defines the on-disk layout of the file system image. The
@@ -106,7 +107,23 @@ message INodeSection {
      */
     repeated fixed32 entries = 2 [packed = true];
   }
-
+  
+  message XAttrCompactProto {
+    /**
+     * 
+     * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
+     * [2:26) -- the name of the entry, which is an ID that points to a
+     * string in the StringTableSection. 
+     * [26:32) -- reserved for future uses.
+     */
+    required fixed32 name = 1;
+    optional bytes value = 2;
+  }
+  
+  message XAttrFeatureProto {
+    repeated XAttrCompactProto xAttrs = 1;
+  }
+  
   message INodeFile {
     optional uint32 replication = 1;
     optional uint64 modificationTime = 2;
@@ -116,6 +133,7 @@ message INodeSection {
     repeated BlockProto blocks = 6;
     optional FileUnderConstructionFeature fileUC = 7;
     optional AclFeatureProto acl = 8;
+    optional XAttrFeatureProto xAttrs = 9;
   }
 
   message INodeDirectory {
@@ -126,6 +144,7 @@ message INodeSection {
     optional uint64 dsQuota = 3;
     optional fixed64 permission = 4;
     optional AclFeatureProto acl = 5;
+    optional XAttrFeatureProto xAttrs = 6;
   }
 
   message INodeSymlink {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Jun 11 19:25:17 2014
@@ -1972,4 +1972,28 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.namenode.xattrs.enabled</name>
+  <value>true</value>
+  <description>
+    Whether support for extended attributes is enabled on the NameNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.fs-limits.max-xattrs-per-inode</name>
+  <value>32</value>
+  <description>
+    Maximum number of extended attributes per inode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.fs-limits.max-xattr-size</name>
+  <value>16384</value>
+  <description>
+    The maximum combined size of the name and value of an extended attribute in bytes.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1596575
  Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1588992-1596568

Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1596575
  Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1588992-1596568

Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1596575
  Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1588992-1596568

Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1596575
  Merged /hadoop/common/branches/HDFS-2006/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1588992-1596568

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Jun 11 19:25:17 2014
@@ -1201,6 +1201,13 @@ public class DFSTestUtil {
             .setType(AclEntryType.OTHER)
             .build());
     filesystem.setAcl(pathConcatTarget, aclEntryList);
+    // OP_SET_XATTR
+    filesystem.setXAttr(pathConcatTarget, "user.a1", 
+        new byte[]{0x31, 0x32, 0x33});
+    filesystem.setXAttr(pathConcatTarget, "user.a2", 
+        new byte[]{0x37, 0x38, 0x39});
+    // OP_REMOVE_XATTR
+    filesystem.removeXAttr(pathConcatTarget, "user.a2");
   }
 
   public static void abortStream(DFSOutputStream out) throws IOException {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Wed Jun 11 19:25:17 2014
@@ -155,7 +155,7 @@ public class TestDFSShell {
   }
   
   @Test (timeout = 30000)
-  public void testRecrusiveRm() throws IOException {
+  public void testRecursiveRm() throws IOException {
 	  Configuration conf = new HdfsConfiguration();
 	  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 	  FileSystem fs = cluster.getFileSystem();
@@ -1567,6 +1567,7 @@ public class TestDFSShell {
       cluster.shutdown();
     }
   }
+
   private static String runLsr(final FsShell shell, String root, int returnvalue
       ) throws Exception {
     System.out.println("root=" + root + ", returnvalue=" + returnvalue);
@@ -1858,6 +1859,333 @@ public class TestDFSShell {
       cluster.shutdown();
     }
   }
+  
+  @Test (timeout = 30000)
+  public void testSetXAttrPermission() throws Exception {
+    UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    MiniDFSCluster cluster = null;
+    PrintStream bak = null;
+    try {
+      final Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      
+      FileSystem fs = cluster.getFileSystem();
+      Path p = new Path("/foo");
+      fs.mkdirs(p);
+      bak = System.err;
+      
+      final FsShell fshell = new FsShell(conf);
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setErr(new PrintStream(out));
+      
+      // No permission to write xattr
+      fs.setPermission(p, new FsPermission((short) 0700));
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          int ret = ToolRunner.run(fshell, new String[]{
+              "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+          assertEquals("Returned should be 1", 1, ret);
+          String str = out.toString();
+          assertTrue("Permission denied printed", 
+              str.indexOf("Permission denied") != -1);
+          out.reset();
+          return null;
+        }
+      });
+      
+      int ret = ToolRunner.run(fshell, new String[]{
+          "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+      assertEquals("Returned should be 0", 0, ret);
+      out.reset();
+      
+      // No permission to read and remove
+      fs.setPermission(p, new FsPermission((short) 0750));
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // Read
+          int ret = ToolRunner.run(fshell, new String[]{
+              "-getfattr", "-n", "user.a1", "/foo"});
+          assertEquals("Returned should be 1", 1, ret);
+          String str = out.toString();
+          assertTrue("Permission denied printed",
+              str.indexOf("Permission denied") != -1);
+          out.reset();           
+          // Remove
+          ret = ToolRunner.run(fshell, new String[]{
+              "-setfattr", "-x", "user.a1", "/foo"});
+          assertEquals("Returned should be 1", 1, ret);
+          str = out.toString();
+          assertTrue("Permission denied printed",
+              str.indexOf("Permission denied") != -1);
+          out.reset();  
+          return null;
+        }
+      });
+    } finally {
+      if (bak != null) {
+        System.setErr(bak);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /* HDFS-6413 xattr names erroneously handled as case-insensitive */
+  @Test (timeout = 30000)
+  public void testSetXAttrCaseSensitivity() throws Exception {
+    UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    MiniDFSCluster cluster = null;
+    PrintStream bak = null;
+    try {
+      final Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+
+      FileSystem fs = cluster.getFileSystem();
+      Path p = new Path("/mydir");
+      fs.mkdirs(p);
+      bak = System.err;
+
+      final FsShell fshell = new FsShell(conf);
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
+        new String[] {"-getfattr", "-d", "/mydir"},
+        new String[] {"user.Foo"},
+        new String[] {});
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
+        new String[] {"-getfattr", "-d", "/mydir"},
+        new String[] {"user.Foo", "user.FOO"},
+        new String[] {});
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
+        new String[] {"-getfattr", "-d", "/mydir"},
+        new String[] {"user.Foo", "user.FOO", "user.foo"},
+        new String[] {});
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
+        new String[] {"-getfattr", "-d", "/mydir"},
+        new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
+        new String[] {"user.Foo=", "user.FOO=", "user.foo="});
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
+        new String[] {"-getfattr", "-d", "/mydir"},
+        new String[] {"user.Foo", "user.FOO"},
+        new String[] {"foo"});
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
+        new String[] {"-getfattr", "-d", "/mydir"},
+        new String[] {"user.Foo"},
+        new String[] {"FOO"});
+
+      doSetXattr(out, fshell,
+        new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
+        new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
+        new String[] {},
+        new String[] {"Foo"});
+
+    } finally {
+      if (bak != null) {
+        System.setOut(bak);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
+    String[] setOp, String[] getOp, String[] expectArr,
+    String[] dontExpectArr) throws Exception {
+    int ret = ToolRunner.run(fshell, setOp);
+    out.reset();
+    ret = ToolRunner.run(fshell, getOp);
+    final String str = out.toString();
+    for (int i = 0; i < expectArr.length; i++) {
+      final String expect = expectArr[i];
+      final StringBuilder sb = new StringBuilder
+        ("Incorrect results from getfattr. Expected: ");
+      sb.append(expect).append(" Full Result: ");
+      sb.append(str);
+      assertTrue(sb.toString(),
+        str.indexOf(expect) != -1);
+    }
+
+    for (int i = 0; i < dontExpectArr.length; i++) {
+      String dontExpect = dontExpectArr[i];
+      final StringBuilder sb = new StringBuilder
+        ("Incorrect results from getfattr. Didn't Expect: ");
+      sb.append(dontExpect).append(" Full Result: ");
+      sb.append(str);
+      assertTrue(sb.toString(),
+        str.indexOf(dontExpect) == -1);
+    }
+    out.reset();
+  }
+
+  /**
+   * HDFS-6374 setXAttr should require the user to be the owner of the file
+   * or directory.
+   *
+   * Test to make sure that only the owner of a file or directory can set
+   * or remove the xattrs.
+   *
+   * As user1:
+   * Create a directory (/foo) as user1, chown it to user1 (and user1's group),
+   * grant rwx to "other".
+   *
+   * As user2:
+   * Set an xattr (should fail).
+   *
+   * As user1:
+   * Set an xattr (should pass).
+   *
+   * As user2:
+   * Read the xattr (should pass).
+   * Remove the xattr (should fail).
+   *
+   * As user1:
+   * Read the xattr (should pass).
+   * Remove the xattr (should pass).
+   */
+  @Test (timeout = 30000)
+  public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
+    final String USER1 = "user1";
+    final String GROUP1 = "mygroup1";
+    final UserGroupInformation user1 = UserGroupInformation.
+        createUserForTesting(USER1, new String[] {GROUP1});
+    final UserGroupInformation user2 = UserGroupInformation.
+        createUserForTesting("user2", new String[] {"mygroup2"});
+    MiniDFSCluster cluster = null;
+    PrintStream bak = null;
+    try {
+      final Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+
+      final FileSystem fs = cluster.getFileSystem();
+      fs.setOwner(new Path("/"), USER1, GROUP1);
+      bak = System.err;
+
+      final FsShell fshell = new FsShell(conf);
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setErr(new PrintStream(out));
+
+      // mkdir foo as user1
+      user1.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final int ret = ToolRunner.run(fshell, new String[]{
+              "-mkdir", "/foo"});
+          assertEquals("Return should be 0", 0, ret);
+          out.reset();
+          return null;
+        }
+      });
+
+      user1.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // Give access to "other"
+          final int ret = ToolRunner.run(fshell, new String[]{
+              "-chmod", "707", "/foo"});
+          assertEquals("Return should be 0", 0, ret);
+          out.reset();
+          return null;
+        }
+      });
+
+      // No permission to write xattr for non-owning user (user2).
+      user2.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final int ret = ToolRunner.run(fshell, new String[]{
+              "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+          assertEquals("Returned should be 1", 1, ret);
+          final String str = out.toString();
+          assertTrue("Permission denied printed",
+              str.indexOf("Permission denied") != -1);
+          out.reset();
+          return null;
+        }
+      });
+
+      // But there should be permission to write xattr for
+      // the owning user.
+      user1.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final int ret = ToolRunner.run(fshell, new String[]{
+              "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+          assertEquals("Returned should be 0", 0, ret);
+          out.reset();
+          return null;
+        }
+      });
+
+      // There should be permission to read,but not to remove for
+      // non-owning user (user2).
+      user2.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // Read
+          int ret = ToolRunner.run(fshell, new String[]{
+              "-getfattr", "-n", "user.a1", "/foo"});
+          assertEquals("Returned should be 0", 0, ret);
+          out.reset();
+          // Remove
+          ret = ToolRunner.run(fshell, new String[]{
+              "-setfattr", "-x", "user.a1", "/foo"});
+          assertEquals("Returned should be 1", 1, ret);
+          final String str = out.toString();
+          assertTrue("Permission denied printed",
+              str.indexOf("Permission denied") != -1);
+          out.reset();
+          return null;
+        }
+      });
+
+      // But there should be permission to read/remove for
+      // the owning user.
+      user1.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          // Read
+          int ret = ToolRunner.run(fshell, new String[]{
+              "-getfattr", "-n", "user.a1", "/foo"});
+          assertEquals("Returned should be 0", 0, ret);
+          out.reset();
+          // Remove
+          ret = ToolRunner.run(fshell, new String[]{
+              "-setfattr", "-x", "user.a1", "/foo"});
+          assertEquals("Returned should be 0", 0, ret);
+          out.reset();
+          return null;
+        }
+      });
+    } finally {
+      if (bak != null) {
+        System.setErr(bak);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 
   /**
    * Test that the server trash configuration is respected when

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Wed Jun 11 19:25:17 2014
@@ -73,6 +73,7 @@ public class TestSafeMode {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();      
     fs = cluster.getFileSystem();
@@ -381,7 +382,19 @@ public class TestSafeMode {
       public void run(FileSystem fs) throws IOException {
         fs.setAcl(file1, Lists.<AclEntry>newArrayList());
       }});
-
+    
+    runFsFun("setXAttr while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.setXAttr(file1, "user.a1", null);
+      }});
+    
+    runFsFun("removeXAttr while in SM", new FSRun() {
+      @Override
+      public void run(FileSystem fs) throws IOException {
+        fs.removeXAttr(file1, "user.a1");
+      }});
+    
     try {
       DFSTestUtil.readFile(fs, file1);
     } catch (IOException ioe) {

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Wed Jun 11 19:25:17 2014
@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Options.Rena
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -66,6 +67,8 @@ import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+import com.google.common.collect.ImmutableList;
+
 public class TestINodeFile {
   // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
   static {
@@ -1041,4 +1044,22 @@ public class TestINodeFile {
     file.toCompleteFile(Time.now());
     assertFalse(file.isUnderConstruction());
   }
+
+  @Test
+  public void testXAttrFeature() {
+    replication = 3;
+    preferredBlockSize = 128*1024*1024;
+    INodeFile inf = createINodeFile(replication, preferredBlockSize);
+    ImmutableList.Builder<XAttr> builder = new ImmutableList.Builder<XAttr>();
+    XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
+    builder.add(xAttr);
+    XAttrFeature f = new XAttrFeature(builder.build());
+    inf.addXAttrFeature(f);
+    XAttrFeature f1 = inf.getXAttrFeature();
+    assertEquals(xAttr, f1.getXAttrs().get(0));
+    inf.removeXAttrFeature();
+    f1 = inf.getXAttrFeature();
+    assertEquals(f1, null);
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Jun 11 19:25:17 2014
@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
     
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals(20, cacheSet.size());
+    assertEquals(22, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
-    assertEquals(20, cacheSet.size());
+    assertEquals(22, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Wed Jun 11 19:25:17 2014
@@ -635,4 +635,68 @@ public class TestStartup {
     fileSys.delete(name, true);
     assertTrue(!fileSys.exists(name));
   }
+
+
+  @Test(timeout = 120000)
+  public void testXattrConfiguration() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, -1);
+      cluster =
+          new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
+      fail("Expected exception with negative xattr size");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Cannot set a negative value for the maximum size of an xattr", e);
+    } finally {
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
+          DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+    try {
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, -1);
+      cluster =
+          new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
+      fail("Expected exception with negative # xattrs per inode");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Cannot set a negative limit on the number of xattrs per inode", e);
+    } finally {
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
+          DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+    try {
+      // Set up a logger to check log message
+      final LogVerificationAppender appender = new LogVerificationAppender();
+      final Logger logger = Logger.getRootLogger();
+      logger.addAppender(appender);
+      int count = appender.countLinesWithMessage(
+          "Maximum size of an xattr: 0 (unlimited)");
+      assertEquals("Expected no messages about unlimited xattr size", 0, count);
+
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
+      cluster =
+          new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
+
+      count = appender.countLinesWithMessage(
+          "Maximum size of an xattr: 0 (unlimited)");
+      // happens twice because we format then run
+      assertEquals("Expected unlimited xattr size", 2, count);
+    } finally {
+      conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
+          DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1601992&r1=1601991&r2=1601992&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Jun 11 19:25:17 2014
@@ -33,6 +33,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
@@ -45,6 +46,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -126,6 +128,7 @@ public class TestRetryCacheWithHA {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();
@@ -157,7 +160,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals(20, cacheSet.size());
+    assertEquals(22, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -178,7 +181,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
-    assertEquals(20, cacheSet.size());
+    assertEquals(22, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
@@ -1001,6 +1004,48 @@ public class TestRetryCacheWithHA {
       return null;
     }
   }
+  
+  /** setXAttr */
+  class SetXAttrOp extends AtMostOnceOp {
+    private final String src;
+
+    SetXAttrOp(DFSClient client, String src) {
+      super("setXAttr", client);
+      this.src = src;
+    }
+
+    @Override
+    void prepare() throws Exception {
+      Path p = new Path(src);
+      if (!dfs.exists(p)) {
+        DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+      }
+    }
+
+    @Override
+    void invoke() throws Exception {
+      client.setXAttr(src, "user.key", "value".getBytes(),
+          EnumSet.of(XAttrSetFlag.CREATE));
+    }
+
+    @Override
+    boolean checkNamenodeBeforeReturn() throws Exception {
+      for (int i = 0; i < CHECKTIMES; i++) {
+        Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+        Set<String> keySet = iter.keySet();
+        if (keySet.contains("user.key")) {
+          return true;
+        }
+        Thread.sleep(1000);
+      }
+      return false;
+    }
+
+    @Override
+    Object getResult() {
+      return null;
+    }
+  }
 
   @Test (timeout=60000)
   public void testCreateSnapshot() throws Exception {
@@ -1130,6 +1175,13 @@ public class TestRetryCacheWithHA {
     AtMostOnceOp op = new RemoveCachePoolOp(client, "pool");
     testClientRetryWithFailover(op);
   }
+  
+  @Test (timeout=60000)
+  public void testSetXAttr() throws Exception {
+    DFSClient client = genClientWithDummyHandler();
+    AtMostOnceOp op = new SetXAttrOp(client, "/setxattr");
+    testClientRetryWithFailover(op);
+  }
 
   /**
    * When NN failover happens, if the client did not receive the response and

Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot:r1596575



Mime
View raw message