hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject [43/50] [abbrv] hadoop git commit: HDFS-11072. Add ability to unset and change directory EC policy. Contributed by Sammi Chen.
Date Wed, 11 Jan 2017 18:19:59 GMT
HDFS-11072. Add ability to unset and change directory EC policy. Contributed by Sammi Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6923165
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6923165
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6923165

Branch: refs/heads/HADOOP-13345
Commit: e69231658dc4a79da936e6856017b5c4f6124ecb
Parents: 4046794
Author: Andrew Wang <wang@apache.org>
Authored: Tue Jan 10 11:32:45 2017 -0800
Committer: Andrew Wang <wang@apache.org>
Committed: Tue Jan 10 11:32:48 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  16 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |  29 ++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  10 +
 .../hadoop/hdfs/protocol/ClientProtocol.java    |   7 +
 .../ClientNamenodeProtocolTranslatorPB.java     |  15 +
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +
 .../src/main/proto/erasurecoding.proto          |   7 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 +
 .../server/namenode/FSDirErasureCodingOp.java   | 115 +++++-
 .../hdfs/server/namenode/FSNamesystem.java      |  36 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 +
 .../hdfs/tools/erasurecode/ECCommand.java       |  34 ++
 .../src/site/markdown/HDFSErasureCoding.md      |   5 +
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |   5 +-
 .../TestUnsetAndChangeDirectoryEcPolicy.java    | 366 +++++++++++++++++++
 .../test/resources/testErasureCodingConf.xml    |  64 +++-
 16 files changed, 725 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 0128b07..7b6a4e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2620,7 +2620,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
           SafeModeException.class,
-          UnresolvedPathException.class);
+          UnresolvedPathException.class,
+          FileNotFoundException.class);
+    }
+  }
+
+  public void unsetErasureCodingPolicy(String src) throws IOException {
+    checkOpen();
+    try (TraceScope ignored =
+             newPathTraceScope("unsetErasureCodingPolicy", src)) {
+      namenode.unsetErasureCodingPolicy(src);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          SafeModeException.class,
+          UnresolvedPathException.class,
+          FileNotFoundException.class);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4f97896..e9475d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2468,6 +2468,35 @@ public class DistributedFileSystem extends FileSystem {
   }
 
   /**
+   * Unset the erasure coding policy from the source path.
+   *
+   * @param path     The directory to unset the policy
+   * @throws IOException
+   */
+  public void unsetErasureCodingPolicy(final Path path) throws IOException {
+    Path absF = fixRelativePart(path);
+    new FileSystemLinkResolver<Void>() {
+      @Override
+      public Void doCall(final Path p) throws IOException {
+        dfs.unsetErasureCodingPolicy(getPathName(p));
+        return null;
+      }
+
+      @Override
+      public Void next(final FileSystem fs, final Path p) throws IOException {
+        if (fs instanceof DistributedFileSystem) {
+          DistributedFileSystem myDfs = (DistributedFileSystem) fs;
+          myDfs.unsetErasureCodingPolicy(p);
+          return null;
+        }
+        throw new UnsupportedOperationException(
+            "Cannot unsetErasureCodingPolicy through a symlink to a "
+                + "non-DistributedFileSystem: " + path + " -> " + p);
+      }
+    }.resolve(this, absF);
+  }
+
+  /**
    * Get the root directory of Trash for a path in HDFS.
    * 1. File in encryption zone returns /ez1/.Trash/username
    * 2. File not in encryption zone, or encountered exception when checking

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 5508064..a192fa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -496,6 +496,16 @@ public class HdfsAdmin {
     return dfs.getClient().getErasureCodingPolicies();
   }
 
+  /**
+   * Unset erasure coding policy from the directory.
+   *
+   * @param path The source path referring to a directory.
+   * @throws IOException
+   */
+  public void unsetErasureCodingPolicy(final Path path) throws IOException {
+    dfs.unsetErasureCodingPolicy(path);
+  }
+
   private void provisionEZTrash(Path path) throws IOException {
     // make sure the path is an EZ
     EncryptionZone ez = dfs.getEZForPath(path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 15bbe51..407621b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1535,6 +1535,13 @@ public interface ClientProtocol {
   ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException;
 
   /**
+   * Unset erasure coding policy from a specified path.
+   * @param src The path to unset policy.
+   */
+  @AtMostOnce
+  void unsetErasureCodingPolicy(String src) throws IOException;
+
+  /**
    * Get {@link QuotaUsage} rooted at the specified directory.
    * @param path The string representation of the path
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index f73abfd..de474b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -173,6 +173,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
@@ -1475,6 +1476,20 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
+  public void unsetErasureCodingPolicy(String src)
+      throws IOException {
+    final UnsetErasureCodingPolicyRequestProto.Builder builder =
+        ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto.newBuilder();
+    builder.setSrc(src);
+    UnsetErasureCodingPolicyRequestProto req = builder.build();
+    try {
+      rpcProxy.unsetErasureCodingPolicy(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
       throws IOException {
     SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 54bed32..ff4db03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -898,6 +898,8 @@ service ClientNamenodeProtocol {
       returns(GetEZForPathResponseProto);
   rpc setErasureCodingPolicy(SetErasureCodingPolicyRequestProto)
       returns(SetErasureCodingPolicyResponseProto);
+  rpc unsetErasureCodingPolicy(UnsetErasureCodingPolicyRequestProto)
+      returns(UnsetErasureCodingPolicyResponseProto);
   rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto)
       returns(GetCurrentEditLogTxidResponseProto);
   rpc getEditsFromTxid(GetEditsFromTxidRequestProto)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 4bb44fb..7ea8d32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -46,6 +46,13 @@ message GetErasureCodingPolicyResponseProto {
   optional ErasureCodingPolicyProto ecPolicy = 1;
 }
 
+message UnsetErasureCodingPolicyRequestProto {
+  required string src = 1;
+}
+
+message UnsetErasureCodingPolicyResponseProto {
+}
+
 /**
  * Block erasure coding reconstruction info
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 3974956..1aa15c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -215,6 +215,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
@@ -1450,6 +1452,18 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
+  public UnsetErasureCodingPolicyResponseProto unsetErasureCodingPolicy(
+      RpcController controller, UnsetErasureCodingPolicyRequestProto req)
+      throws ServiceException {
+    try {
+      server.unsetErasureCodingPolicy(req.getSrc());
+      return UnsetErasureCodingPolicyResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public SetXAttrResponseProto setXAttr(RpcController controller,
       SetXAttrRequestProto req) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 1f3b135..0ab8c89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -80,7 +80,7 @@ final class FSDirErasureCodingOp {
     try {
       iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
       src = iip.getPath();
-      xAttrs = createErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
+      xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
     } finally {
       fsd.writeUnlock();
     }
@@ -88,21 +88,20 @@ final class FSDirErasureCodingOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static List<XAttr> createErasureCodingPolicyXAttr(final FSNamesystem fsn,
+  static List<XAttr> setErasureCodingPolicyXAttr(final FSNamesystem fsn,
       final INodesInPath srcIIP, ErasureCodingPolicy ecPolicy) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();
     assert fsd.hasWriteLock();
     Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
     String src = srcIIP.getPath();
-    if (srcIIP.getLastINode() != null &&
-        !srcIIP.getLastINode().isDirectory()) {
+    final INode inode = srcIIP.getLastINode();
+    if (inode == null) {
+      throw new FileNotFoundException("Path not found: " + srcIIP.getPath());
+    }
+    if (!inode.isDirectory()) {
       throw new IOException("Attempt to set an erasure coding policy " +
           "for a file " + src);
     }
-    if (getErasureCodingPolicyForPath(fsn, srcIIP) != null) {
-      throw new IOException("Directory " + src + " already has an " +
-          "erasure coding policy.");
-    }
 
     // System default erasure coding policy will be used since no specified.
     if (ecPolicy == null) {
@@ -124,7 +123,7 @@ final class FSDirErasureCodingOp {
           ecPolicyNames.add(activePolicy.getName());
         }
         throw new HadoopIllegalArgumentException("Policy [ " +
-            ecPolicy.getName()+ " ] does not match any of the " +
+            ecPolicy.getName() + " ] does not match any of the " +
             "supported policies. Please select any one of " + ecPolicyNames);
       }
     }
@@ -140,10 +139,76 @@ final class FSDirErasureCodingOp {
     } finally {
       IOUtils.closeStream(dOut);
     }
+    // check whether the directory already has an erasure coding policy
+    // directly on itself.
+    final Boolean hasEcXAttr =
+        getErasureCodingPolicyXAttrForINode(fsn, inode) == null ? false : true;
     final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
     xattrs.add(ecXAttr);
-    FSDirXAttrOp.unprotectedSetXAttrs(fsd, srcIIP, xattrs,
-        EnumSet.of(XAttrSetFlag.CREATE));
+    final EnumSet<XAttrSetFlag> flag = hasEcXAttr ?
+        EnumSet.of(XAttrSetFlag.REPLACE) : EnumSet.of(XAttrSetFlag.CREATE);
+    FSDirXAttrOp.unprotectedSetXAttrs(fsd, srcIIP, xattrs, flag);
+    return xattrs;
+  }
+
+  /**
+   * Unset erasure coding policy from the given directory.
+   *
+   * @param fsn The namespace
+   * @param srcArg The path of the target directory.
+   * @param logRetryCache whether to record RPC ids in editlog for retry
+   *          cache rebuilding
+   * @return {@link HdfsFileStatus}
+   * @throws IOException
+   */
+  static HdfsFileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
+      final String srcArg, final boolean logRetryCache) throws IOException {
+    assert fsn.hasWriteLock();
+
+    String src = srcArg;
+    FSPermissionChecker pc = fsn.getPermissionChecker();
+    FSDirectory fsd = fsn.getFSDirectory();
+    final INodesInPath iip;
+    List<XAttr> xAttrs;
+    fsd.writeLock();
+    try {
+      iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
+      src = iip.getPath();
+      xAttrs = removeErasureCodingPolicyXAttr(fsn, iip);
+    } finally {
+      fsd.writeUnlock();
+    }
+    if (xAttrs != null) {
+      fsn.getEditLog().logRemoveXAttrs(src, xAttrs, logRetryCache);
+    }
+    return fsd.getAuditFileInfo(iip);
+  }
+
+  private static List<XAttr> removeErasureCodingPolicyXAttr(
+      final FSNamesystem fsn, final INodesInPath srcIIP) throws IOException {
+    FSDirectory fsd = fsn.getFSDirectory();
+    assert fsd.hasWriteLock();
+    Preconditions.checkNotNull(srcIIP, "INodes cannot be null");
+    String src = srcIIP.getPath();
+    final INode inode = srcIIP.getLastINode();
+    if (inode == null) {
+      throw new FileNotFoundException("Path not found: " + srcIIP.getPath());
+    }
+    if (!inode.isDirectory()) {
+      throw new IOException("Cannot unset an erasure coding policy " +
+          "on a file " + src);
+    }
+
+    // Check whether the directory has a specific erasure coding policy
+    // directly on itself.
+    final XAttr ecXAttr = getErasureCodingPolicyXAttrForINode(fsn, inode);
+    if (ecXAttr == null) {
+      return null;
+    }
+
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    xattrs.add(ecXAttr);
+    FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP.getPath(), xattrs);
     return xattrs;
   }
 
@@ -271,4 +336,32 @@ final class FSDirErasureCodingOp {
     }
     return null;
   }
+
+  private static XAttr getErasureCodingPolicyXAttrForINode(
+      FSNamesystem fsn, INode inode) throws IOException {
+    // INode can be null
+    if (inode == null) {
+      return null;
+    }
+    FSDirectory fsd = fsn.getFSDirectory();
+    fsd.readLock();
+    try {
+      // We don't allow setting EC policies on paths with a symlink. Thus
+      // if a symlink is encountered, the dir shouldn't have EC policy.
+      // TODO: properly support symlinks
+      if (inode.isSymlink()) {
+        return null;
+      }
+      final XAttrFeature xaf = inode.getXAttrFeature();
+      if (xaf != null) {
+        XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY);
+        if (xattr != null) {
+          return xattr;
+        }
+      }
+    } finally {
+      fsd.readUnlock();
+    }
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 90fb924..249324b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6775,6 +6775,42 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
+   * Unset an erasure coding policy from the given path.
+   * @param srcArg  The path of the target directory.
+   * @throws AccessControlException  if the caller is not the superuser.
+   * @throws UnresolvedLinkException if the path can't be resolved.
+   * @throws SafeModeException       if the Namenode is in safe mode.
+   */
+  void unsetErasureCodingPolicy(final String srcArg,
+      final boolean logRetryCache) throws IOException,
+      UnresolvedLinkException, SafeModeException, AccessControlException {
+    final String operationName = "unsetErasureCodingPolicy";
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.WRITE);
+    HdfsFileStatus resultingStat = null;
+    boolean success = false;
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot unset erasure coding policy on " + srcArg);
+      resultingStat = FSDirErasureCodingOp.unsetErasureCodingPolicy(this,
+          srcArg, logRetryCache);
+      success = true;
+    } catch (AccessControlException ace) {
+      logAuditEvent(success, operationName, srcArg, null,
+          resultingStat);
+      throw ace;
+    } finally {
+      writeUnlock(operationName);
+      if (success) {
+        getEditLog().logSync();
+      }
+    }
+    logAuditEvent(success, operationName, srcArg, null,
+        resultingStat);
+  }
+
+  /**
    * Get the erasure coding policy information for specified path
    */
   ErasureCodingPolicy getErasureCodingPolicy(String src)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 735b2c0..6a3f966 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -2231,6 +2231,22 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     return namesystem.getErasureCodingPolicy(src);
   }
 
+  @Override // ClientProtocol
+  public void unsetErasureCodingPolicy(String src) throws IOException {
+    checkNNStartup();
+    final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return;
+    }
+    boolean success = false;
+    try {
+      namesystem.unsetErasureCodingPolicy(src, cacheEntry != null);
+      success = true;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
+    }
+  }
+
   @Override // ReconfigurationProtocol
   public void startReconfiguration() throws IOException {
     checkNNStartup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
index 978fe47..fc732e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
@@ -47,6 +47,8 @@ public abstract class ECCommand extends Command {
     factory.addClass(SetECPolicyCommand.class, "-" + SetECPolicyCommand.NAME);
     factory.addClass(GetECPolicyCommand.class, "-"
         + GetECPolicyCommand.NAME);
+    factory.addClass(UnsetECPolicyCommand.class, "-"
+        + UnsetECPolicyCommand.NAME);
     factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME);
   }
 
@@ -211,4 +213,36 @@ public abstract class ECCommand extends Command {
       out.println(sb.toString());
     }
   }
+
+  /**
+   * Unset the erasure coding policy from a directory.
+   */
+  static class UnsetECPolicyCommand extends ECCommand {
+    public static final String NAME = "unsetPolicy";
+    public static final String USAGE = "<path>";
+    public static final String DESCRIPTION =
+        "Unset erasure coding policy from a directory\n";
+
+    @Override
+    protected void processOptions(LinkedList<String> args) throws IOException {
+      if (args.isEmpty()) {
+        throw new HadoopIllegalArgumentException("<path> is missing");
+      }
+      if (args.size() > 1) {
+        throw new HadoopIllegalArgumentException("Too many arguments");
+      }
+    }
+
+    @Override
+    protected void processPath(PathData item) throws IOException {
+      super.processPath(item);
+      DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
+      try {
+        dfs.unsetErasureCodingPolicy(item.path);
+      } catch (IOException e) {
+        throw new IOException("Unable to unset EC policy from directory "
+            + item.path + ". " + e.getMessage());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 517469d..d5dbd0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -129,6 +129,7 @@ Deployment
        hdfs erasurecode [generic options]
          [-setPolicy [-p <policyName>] <path>]
          [-getPolicy <path>]
+         [-unsetPolicy <path>]
          [-listPolicies]
          [-usage [cmd ...]]
          [-help [cmd ...]]
@@ -147,6 +148,10 @@ Below are the details about each command.
 
      Get details of the ErasureCoding policy of a file or directory at the specified path.
 
+ *  `[-unsetPolicy <path>]`
+
+     Unset an ErasureCoding policy from a directory at the specified path when previously
user sets the ErasureCoding policy on this directory via "setPolicy" command. If the directory
inherits the ErasureCoding policy from its parent group, "unsetPolicy" command on this directory
will not have any effect. Unset ErasureCoding policy on a directory which doesn't have ErasureCoding
policy will not return an error.
+
  *  `[-listPolicies]`
 
      Lists all supported ErasureCoding policies. These names are suitable for use with the
`setPolicy` command.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index b7c3ed8..27fbf18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -155,7 +155,7 @@ public class TestErasureCodingPolicies {
     INode newInode = namesystem.getFSDirectory().getINode(newFile.toString());
     assertTrue(newInode.asFile().isStriped());
 
-    /* Verify that nested EC policies not supported */
+    /* Verify that nested EC policies are supported */
     final Path dir1 = new Path("/dir1");
     final Path dir2 = new Path(dir1, "dir2");
     fs.mkdir(dir1, FsPermission.getDirDefault());
@@ -163,9 +163,8 @@ public class TestErasureCodingPolicies {
     fs.mkdir(dir2, FsPermission.getDirDefault());
     try {
       fs.getClient().setErasureCodingPolicy(dir2.toString(), null);
-      fail("Nested erasure coding policies");
     } catch (IOException e) {
-      assertExceptionContains("already has an erasure coding policy", e);
+      fail("Nested erasure coding policies are supported");
     }
 
     /* Verify that EC policy cannot be set on a file */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
new file mode 100644
index 0000000..1a4086e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
@@ -0,0 +1,366 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
+import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.Assert;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.fail;
+
+/**
+ * Test unset and change directory's erasure coding policy.
+ */
+public class TestUnsetAndChangeDirectoryEcPolicy {
+
+  public static final Log LOG =
+      LogFactory.getLog(TestUnsetAndChangeDirectoryEcPolicy.class);
+
+  private MiniDFSCluster cluster;
+  private Configuration conf = new Configuration();
+  private DistributedFileSystem fs;
+  private ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager
+      .getSystemDefaultPolicy();
+  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+  private final int cellSize = ecPolicy.getCellSize();
+  private final int stripsPerBlock = 2;
+  private final int blockSize = stripsPerBlock * cellSize;
+  private final int blockGroupSize =  dataBlocks * blockSize;
+
+  @Rule
+  public Timeout globalTimeout = new Timeout(300000);
+
+  @Before
+  public void setup() throws IOException {
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
+    if (ErasureCodeNative.isNativeCodeLoaded()) {
+      conf.set(
+          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+          NativeRSRawErasureCoderFactory.class.getCanonicalName());
+    }
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+        dataBlocks + parityBlocks).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  /*
+   * Test unset EC policy on directory.
+   */
+  @Test
+  public void testUnsetEcPolicy() throws Exception {
+    final int numBlocks = 1;
+    final int fileLen = blockGroupSize * numBlocks;
+    final Path dirPath = new Path("/striped");
+    final Path ecFilePath = new Path(dirPath, "ec_file");
+    final Path replicateFilePath = new Path(dirPath, "3x_file");
+
+    fs.mkdirs(dirPath);
+    // Test unset a directory which has no EC policy
+    fs.unsetErasureCodingPolicy(dirPath);
+    // Set EC policy on directory
+    fs.setErasureCodingPolicy(dirPath, ecPolicy);
+
+    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
+    fs.unsetErasureCodingPolicy(dirPath);
+    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
+
+    // ec_file should has EC policy
+    ErasureCodingPolicy tempEcPolicy =
+        fs.getErasureCodingPolicy(ecFilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    // rep_file should not have EC policy
+    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
+    Assert.assertNull("Replicate file should not have erasure coding policy!",
+        tempEcPolicy);
+
+    // Directory should not return erasure coding policy
+    tempEcPolicy = fs.getErasureCodingPolicy(dirPath);
+    Assert.assertNull("Directory should no have erasure coding policy set!",
+        tempEcPolicy);
+
+    fs.delete(dirPath, true);
+  }
+
+  /*
+  * Test nested directory with different EC policy.
+  */
+  @Test
+  public void testNestedEcPolicy() throws Exception {
+    final int numBlocks = 1;
+    final int fileLen = blockGroupSize * numBlocks;
+    final Path parentDir = new Path("/ec-6-3");
+    final Path childDir = new Path("/ec-6-3/ec-3-2");
+    final Path ec63FilePath = new Path(childDir, "ec_6_3_file");
+    final Path ec32FilePath = new Path(childDir, "ec_3_2_file");
+    final Path ec63FilePath2 = new Path(childDir, "ec_6_3_file_2");
+    final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager
+        .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
+
+    fs.mkdirs(parentDir);
+    fs.setErasureCodingPolicy(parentDir, ecPolicy);
+    fs.mkdirs(childDir);
+    // Create RS(6,3) EC policy file
+    DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
+    // Set RS(3,2) EC policy on child directory
+    fs.setErasureCodingPolicy(childDir, ec32Policy);
+    // Create RS(3,2) EC policy file
+    DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
+
+    // Start to check
+    // ec_6_3_file should has RS-6-3 EC policy
+    ErasureCodingPolicy tempEcPolicy =
+        fs.getErasureCodingPolicy(ec63FilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    // ec_3_2_file should have RS-3-2 policy
+    tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ec32Policy.getName()));
+
+    // Child directory should have RS-3-2 policy
+    tempEcPolicy = fs.getErasureCodingPolicy(childDir);
+    Assert.assertTrue(
+        "Directory should have erasure coding policy set!",
+        tempEcPolicy.getName().equals(ec32Policy.getName()));
+
+    // Unset EC policy on child directory
+    fs.unsetErasureCodingPolicy(childDir);
+    DFSTestUtil.createFile(fs, ec63FilePath2, fileLen, (short) 1, 0L);
+
+    // ec_6_3_file_2 should have RS-6-3 policy
+    tempEcPolicy = fs.getErasureCodingPolicy(ec63FilePath2);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    // Child directory should have RS-6-3 policy now
+    tempEcPolicy = fs.getErasureCodingPolicy(childDir);
+    Assert.assertTrue(
+        "Directory should have erasure coding policy set!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    fs.delete(parentDir, true);
+  }
+
+
+  /*
+   * Test unset EC policy on root directory.
+   */
+  @Test
+  public void testUnsetRootDirEcPolicy() throws Exception {
+    final int numBlocks = 1;
+    final int fileLen = blockGroupSize * numBlocks;
+    final Path rootPath = new Path("/");
+    final Path ecFilePath = new Path(rootPath, "ec_file");
+    final Path replicateFilePath = new Path(rootPath, "rep_file");
+
+    // Test unset root path which has no EC policy
+    fs.unsetErasureCodingPolicy(rootPath);
+    // Set EC policy on root path
+    fs.setErasureCodingPolicy(rootPath, ecPolicy);
+    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
+    fs.unsetErasureCodingPolicy(rootPath);
+    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
+
+    // ec_file should has EC policy set
+    ErasureCodingPolicy tempEcPolicy =
+        fs.getErasureCodingPolicy(ecFilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    // rep_file should not have EC policy set
+    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
+    Assert.assertNull("Replicate file should not have erasure coding policy!",
+        tempEcPolicy);
+
+    // Directory should not return erasure coding policy
+    tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
+    Assert.assertNull("Directory should not have erasure coding policy set!",
+        tempEcPolicy);
+
+    fs.delete(rootPath, true);
+  }
+
+  /*
+  * Test change EC policy on root directory.
+  */
+  @Test
+  public void testChangeRootDirEcPolicy() throws Exception {
+    final int numBlocks = 1;
+    final int fileLen = blockGroupSize * numBlocks;
+    final Path rootPath = new Path("/");
+    final Path ec63FilePath = new Path(rootPath, "ec_6_3_file");
+    final Path ec32FilePath = new Path(rootPath, "ec_3_2_file");
+    final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager
+        .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
+
+    fs.unsetErasureCodingPolicy(rootPath);
+    fs.setErasureCodingPolicy(rootPath, ecPolicy);
+    // Create RS(6,3) EC policy file
+    DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
+    // Change EC policy from RS(6,3) to RS(3,2)
+    fs.setErasureCodingPolicy(rootPath, ec32Policy);
+    DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
+
+    // start to check
+    // ec_6_3_file should has RS-6-3 ec policy set
+    ErasureCodingPolicy tempEcPolicy =
+        fs.getErasureCodingPolicy(ec63FilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    // ec_3_2_file should have RS-3-2 policy
+    tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ec32Policy.getName()));
+
+    // Root directory should have RS-3-2 policy
+    tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
+    Assert.assertTrue(
+        "Directory should have erasure coding policy!",
+        tempEcPolicy.getName().equals(ec32Policy.getName()));
+
+    fs.delete(rootPath, true);
+  }
+
+  /*
+   * Test different replica factor files.
+   */
+  @Test
+  public void testDifferentReplicaFactor() throws Exception {
+    final int numBlocks = 1;
+    final int fileLen = blockGroupSize * numBlocks;
+    final Path ecDirPath = new Path("/striped");
+    final Path ecFilePath = new Path(ecDirPath, "ec_file");
+    final Path replicateFilePath = new Path(ecDirPath, "rep_file");
+    final Path replicateFilePath2 = new Path(ecDirPath, "rep_file2");
+
+    fs.mkdirs(ecDirPath);
+    fs.setErasureCodingPolicy(ecDirPath, ecPolicy);
+    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
+    fs.unsetErasureCodingPolicy(ecDirPath);
+    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 3, 0L);
+    DFSTestUtil.createFile(fs, replicateFilePath2, fileLen, (short) 2, 0L);
+
+    // ec_file should has EC policy set
+    ErasureCodingPolicy tempEcPolicy =
+        fs.getErasureCodingPolicy(ecFilePath);
+    Assert.assertTrue("Erasure coding policy mismatch!",
+        tempEcPolicy.getName().equals(ecPolicy.getName()));
+
+    // rep_file should not have EC policy set
+    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
+    Assert.assertNull("Replicate file should not have erasure coding policy!",
+        tempEcPolicy);
+    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath2);
+    Assert.assertNull("Replicate file should not have erasure coding policy!",
+        tempEcPolicy);
+
+    // Directory should not return erasure coding policy
+    tempEcPolicy = fs.getErasureCodingPolicy(ecDirPath);
+    Assert.assertNull("Directory should not have erasure coding policy set!",
+        tempEcPolicy);
+
+    fs.delete(ecDirPath, true);
+  }
+
+
+  /*
+   * Test set and unset EC policy on directory doesn't exist.
+   */
+  @Test
+  public void testNonExistentDir() throws Exception {
+    final Path dirPath = new Path("/striped");
+
+    // Unset EC policy on non-existent directory
+    try {
+      fs.unsetErasureCodingPolicy(dirPath);
+      fail("FileNotFoundException should be thrown for a non-existent"
+          + " file path");
+    } catch (FileNotFoundException e) {
+      assertExceptionContains("Path not found: " + dirPath, e);
+    }
+
+    // Set EC policy on non-existent directory
+    try {
+      fs.setErasureCodingPolicy(dirPath, ecPolicy);
+      fail("FileNotFoundException should be thrown for a non-existent"
+          + " file path");
+    } catch (FileNotFoundException e) {
+      assertExceptionContains("Path not found: " + dirPath, e);
+    }
+  }
+
+  /*
+   * Test set and unset EC policy on file.
+   */
+  @Test
+  public void testEcPolicyOnFile() throws Exception {
+    final Path ecFilePath = new Path("/striped_file");
+    final int fileLen = blockGroupSize * 2;
+    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
+
+    // Set EC policy on file
+    try {
+      fs.setErasureCodingPolicy(ecFilePath, ecPolicy);
+      fail("IOException should be thrown for setting EC policy on file");
+    } catch (IOException e) {
+      assertExceptionContains("Attempt to set an erasure coding policy " +
+          "for a file " + ecFilePath, e);
+    }
+
+    // Unset EC policy on file
+    try {
+      fs.unsetErasureCodingPolicy(ecFilePath);
+      fail("IOException should be thrown for unsetting EC policy on file");
+    } catch (IOException e) {
+      assertExceptionContains("Cannot unset an erasure coding policy on a file "
+          + ecFilePath, e);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6923165/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index f8ee973..dd26b48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -151,7 +151,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Directory /ecdir already has an erasure coding policy</expected-output>
+          <expected-output>EC policy set successfully at NAMENODE/ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -175,6 +175,68 @@
     </test>
 
     <test>
+      <description>unsetPolicy : unset policy and get</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /ecdir</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -unsetPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /ecdir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>is not erasure coded.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setPolicy : change different policy and get</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /ecdir</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -p RS-DEFAULT-3-2-64k
+          /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /ecdir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>ErasureCodingPolicy=[Name=RS-DEFAULT-3-2-64k</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>unsetPolicy : unset inherited EC policy, has no effect</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /ecdir</command>
+        <command>-fs NAMENODE -mkdir /ecdir/child</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -unsetPolicy /ecdir/child</ec-admin-command>
+        <command>-fs NAMENODE -touchz /ecdir/child/ecfile</command>
+        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir/child/ecfile</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /ecdir/child/ecfile</command>
+        <command>-fs NAMENODE -rmdir /ecdir/child</command>
+        <command>-fs NAMENODE -rmdir /ecdir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>ErasureCodingPolicy=[Name=RS-DEFAULT-6-3-64k</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
       <description>getPolicy : get EC policy information at specified path, which doesn't
have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /noec</command>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message