Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id D75991884D for ; Wed, 29 Apr 2015 19:43:35 +0000 (UTC) Received: (qmail 87776 invoked by uid 500); 29 Apr 2015 19:43:29 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 87418 invoked by uid 500); 29 Apr 2015 19:43:29 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 87179 invoked by uid 99); 29 Apr 2015 19:43:29 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 29 Apr 2015 19:43:29 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 349CFE0D59; Wed, 29 Apr 2015 19:43:29 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jing9@apache.org To: common-commits@hadoop.apache.org Date: Wed, 29 Apr 2015 19:43:32 -0000 Message-Id: <9421754e39484380ac3dcb763ac30117@git.apache.org> In-Reply-To: <78c5c731832d4ba890c3d72645c7d95c@git.apache.org> References: <78c5c731832d4ba890c3d72645c7d95c@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [04/50] hadoop git commit: HDFS-7839. Erasure coding: implement facilities in NameNode to create and manage EC zones. Contributed by Zhe Zhang HDFS-7839. Erasure coding: implement facilities in NameNode to create and manage EC zones. Contributed by Zhe Zhang Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d717dc3e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d717dc3e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d717dc3e Branch: refs/heads/HDFS-7285 Commit: d717dc3e9ee5d81c6e4365e4ed1015b392bdd653 Parents: 45db005 Author: Zhe Zhang Authored: Thu Apr 2 22:38:29 2015 -0700 Committer: Jing Zhao Committed: Wed Apr 29 11:06:49 2015 -0700 ---------------------------------------------------------------------- .../java/org/apache/hadoop/hdfs/DFSClient.java | 15 ++ .../hadoop/hdfs/protocol/ClientProtocol.java | 8 + .../hadoop/hdfs/protocol/HdfsConstants.java | 2 - ...tNamenodeProtocolServerSideTranslatorPB.java | 14 ++ .../ClientNamenodeProtocolTranslatorPB.java | 16 ++ .../BlockStoragePolicySuite.java | 5 - .../hdfs/server/common/HdfsServerConstants.java | 2 + .../namenode/ErasureCodingZoneManager.java | 112 ++++++++++++++ .../hdfs/server/namenode/FSDirRenameOp.java | 2 + .../hdfs/server/namenode/FSDirectory.java | 26 +++- .../hdfs/server/namenode/FSNamesystem.java | 40 +++++ .../hadoop/hdfs/server/namenode/INodeFile.java | 10 +- .../hdfs/server/namenode/NameNodeRpcServer.java | 16 ++ .../src/main/proto/ClientNamenodeProtocol.proto | 9 ++ .../hadoop/hdfs/TestBlockStoragePolicy.java | 12 +- .../hadoop/hdfs/TestErasureCodingZones.java | 151 +++++++++++++++++++ .../TestBlockInitialEncoding.java | 75 --------- .../server/namenode/TestAddStripedBlocks.java | 2 +- .../server/namenode/TestFSEditLogLoader.java | 6 +- .../hdfs/server/namenode/TestFSImage.java | 23 ++- .../namenode/TestRecoverStripedBlocks.java | 7 +- 21 files changed, 431 insertions(+), 122 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 8fc9e77..da3b0e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2954,6 +2954,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, return new EncryptionZoneIterator(namenode, traceSampler); } + public void createErasureCodingZone(String src) + throws IOException { + checkOpen(); + TraceScope scope = getPathTraceScope("createErasureCodingZone", src); + try { + namenode.createErasureCodingZone(src); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + SafeModeException.class, + UnresolvedPathException.class); + } finally { + scope.close(); + } + } + public void setXAttr(String src, String name, byte[] value, EnumSet flag) throws IOException { checkOpen(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index bafb02b..8efe344 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1363,6 +1363,14 @@ public interface ClientProtocol { long prevId) throws IOException; /** + * Create an erasure coding zone (currently with hardcoded schema) + * TODO: Configurable and pluggable schemas (HDFS-7337) + */ + @Idempotent + public void createErasureCodingZone(String src) + throws IOException; + + /** * Set xattr of a file or directory. * The name must be prefixed with the namespace followed by ".". For example, * "user.attr". http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index ed5f27d..892c5f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -164,7 +164,6 @@ public class HdfsConstants { public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD"; public static final String HOT_STORAGE_POLICY_NAME = "HOT"; public static final String WARM_STORAGE_POLICY_NAME = "WARM"; - public static final String EC_STORAGE_POLICY_NAME = "EC"; public static final String COLD_STORAGE_POLICY_NAME = "COLD"; public static final byte MEMORY_STORAGE_POLICY_ID = 15; @@ -172,7 +171,6 @@ public class HdfsConstants { public static final byte ONESSD_STORAGE_POLICY_ID = 10; public static final byte HOT_STORAGE_POLICY_ID = 7; public static final byte WARM_STORAGE_POLICY_ID = 5; - public static final byte EC_STORAGE_POLICY_ID = 4; public static final byte COLD_STORAGE_POLICY_ID = 2; public static final byte NUM_DATA_BLOCKS = 3; http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index a292d8f..2b8215e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -192,6 +192,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateErasureCodingZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateErasureCodingZoneResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto; @@ -1391,6 +1393,18 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override + public CreateErasureCodingZoneResponseProto createErasureCodingZone( + RpcController controller, CreateErasureCodingZoneRequestProto req) + throws ServiceException { + try { + server.createErasureCodingZone(req.getSrc()); + return CreateErasureCodingZoneResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public SetXAttrResponseProto setXAttr(RpcController controller, SetXAttrRequestProto req) throws ServiceException { try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 22da083..43a0322 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -160,6 +160,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Trunca import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateErasureCodingZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateErasureCodingZoneResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto; @@ -1407,6 +1409,20 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override + public void createErasureCodingZone(String src) + throws IOException { + final CreateErasureCodingZoneRequestProto.Builder builder = + CreateErasureCodingZoneRequestProto.newBuilder(); + builder.setSrc(src); + CreateErasureCodingZoneRequestProto req = builder.build(); + try { + rpcProxy.createErasureCodingZone(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder() http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index a1c96b9..6c352f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -77,11 +77,6 @@ public class BlockStoragePolicySuite { new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}); - final byte ecId = HdfsConstants.EC_STORAGE_POLICY_ID; - policies[ecId] = new BlockStoragePolicy(ecId, - HdfsConstants.EC_STORAGE_POLICY_NAME, - new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY, - new StorageType[]{StorageType.ARCHIVE}); final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID; policies[coldId] = new BlockStoragePolicy(coldId, HdfsConstants.COLD_STORAGE_POLICY_NAME, http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 2d267ce..8fd0197 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -327,4 +327,6 @@ public final class HdfsServerConstants { "raw.hdfs.crypto.file.encryption.info"; public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER = "security.hdfs.unreadable.by.superuser"; + public static final String XATTR_ERASURECODING_ZONE = + "raw.hdfs.erasurecoding.zone"; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java new file mode 100644 index 0000000..d4ff7c5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.XAttrHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; + +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_ZONE; + +/** + * Manages the list of erasure coding zones in the filesystem. + *

+ * The ErasureCodingZoneManager has its own lock, but relies on the FSDirectory + * lock being held for many operations. The FSDirectory lock should not be + * taken if the manager lock is already held. + * TODO: consolidate zone logic w/ encrypt. zones {@link EncryptionZoneManager} + */ +public class ErasureCodingZoneManager { + private final FSDirectory dir; + + /** + * Construct a new ErasureCodingZoneManager. + * + * @param dir Enclosing FSDirectory + */ + public ErasureCodingZoneManager(FSDirectory dir) { + this.dir = dir; + } + + boolean getECPolicy(INodesInPath iip) { + assert dir.hasReadLock(); + Preconditions.checkNotNull(iip); + List inodes = iip.getReadOnlyINodes(); + for (int i = inodes.size() - 1; i >= 0; i--) { + final INode inode = inodes.get(i); + if (inode == null) { + continue; + } + final List xAttrs = inode.getXAttrFeature() == null ? + new ArrayList(0) + : inode.getXAttrFeature().getXAttrs(); + for (XAttr xAttr : xAttrs) { + if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) { + return true; + } + } + } + return false; + } + + XAttr createErasureCodingZone(String src) + throws IOException { + assert dir.hasWriteLock(); + final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); + if (dir.isNonEmptyDirectory(srcIIP)) { + throw new IOException( + "Attempt to create an erasure coding zone for a " + + "non-empty directory."); + } + if (srcIIP != null && + srcIIP.getLastINode() != null && + !srcIIP.getLastINode().isDirectory()) { + throw new IOException("Attempt to create an erasure coding zone " + + "for a file."); + } + if (getECPolicy(srcIIP)) { + throw new IOException("Directory " + src + " is already in an " + + "erasure coding zone."); + } + final XAttr ecXAttr = XAttrHelper + .buildXAttr(XATTR_ERASURECODING_ZONE, null); + final List xattrs = Lists.newArrayListWithCapacity(1); + xattrs.add(ecXAttr); + FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs, + EnumSet.of(XAttrSetFlag.CREATE)); + return ecXAttr; + } + + void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) + throws IOException { + assert dir.hasReadLock(); + if (getECPolicy(srcIIP) + != getECPolicy(dstIIP)) { + throw new IOException( + src + " can't be moved because the source and destination have " + + "different erasure coding policies."); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index c57cae2..c38c5c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -185,6 +185,7 @@ class FSDirRenameOp { } fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); + fsd.ecZoneManager.checkMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(fsd, srcIIP, dstIIP); verifyQuotaForRename(fsd, srcIIP, dstIIP); @@ -358,6 +359,7 @@ class FSDirRenameOp { BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); + fsd.ecZoneManager.checkMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); List snapshottableDirs = new ArrayList<>(); if (dstInode != null) { // Destination exists http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 56a20a3..a07ff23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -212,6 +212,9 @@ public class FSDirectory implements Closeable { @VisibleForTesting public final EncryptionZoneManager ezManager; + @VisibleForTesting + public final ErasureCodingZoneManager ecZoneManager; + /** * Caches frequently used file names used in {@link INode} to reuse * byte[] objects and reduce heap usage. @@ -303,6 +306,7 @@ public class FSDirectory implements Closeable { namesystem = ns; this.editLog = ns.getEditLog(); ezManager = new EncryptionZoneManager(this, conf); + ecZoneManager = new ErasureCodingZoneManager(this); } FSNamesystem getFSNamesystem() { @@ -426,8 +430,7 @@ public class FSDirectory implements Closeable { writeLock(); try { newiip = addINode(existing, newNode); - // TODO: we will no longer use storage policy for "Erasure Coding Zone" - if (newiip != null && newNode.isStriped()) { + if (newiip != null && getECPolicy(newiip)) { newNode.addStripedBlocksFeature(); } } finally { @@ -1397,6 +1400,25 @@ public class FSDirectory implements Closeable { } } + XAttr createErasureCodingZone(String src) + throws IOException { + writeLock(); + try { + return ecZoneManager.createErasureCodingZone(src); + } finally { + writeUnlock(); + } + } + + public boolean getECPolicy(INodesInPath iip) { + readLock(); + try { + return ecZoneManager.getECPolicy(iip); + } finally { + readUnlock(); + } + } + static INode resolveLastINode(INodesInPath iip) throws FileNotFoundException { INode inode = iip.getLastINode(); if (inode == null) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f1b0eb8..1740365 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8111,6 +8111,46 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } } + /** + * Create an erasure coding zone on directory src. + * + * @param src the path of a directory which will be the root of the + * erasure coding zone. The directory must be empty. + * @throws AccessControlException if the caller is not the superuser. + * @throws UnresolvedLinkException if the path can't be resolved. + * @throws SafeModeException if the Namenode is in safe mode. + */ + void createErasureCodingZone(final String srcArg, + final boolean logRetryCache) + throws IOException, UnresolvedLinkException, + SafeModeException, AccessControlException { + String src = srcArg; + HdfsFileStatus resultingStat = null; + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + final byte[][] pathComponents = + FSDirectory.getPathComponentsForReservedPath(src); + FSPermissionChecker pc = getPermissionChecker(); + writeLock(); + try { + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot create erasure coding zone on " + src); + src = dir.resolvePath(pc, src, pathComponents); + + final XAttr ecXAttr = dir.createErasureCodingZone(src); + List xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(ecXAttr); + getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); + final INodesInPath iip = dir.getINodesInPath4Write(src, false); + resultingStat = dir.getAuditFileInfo(iip); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "createErasureCodingZone", srcArg, null, resultingStat); + } + void setXAttr(String src, XAttr xAttr, EnumSet flag, boolean logRetryCache) throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 807f9b3..deb89d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -419,7 +419,7 @@ public class INodeFile extends INodeWithAdditionalFields } max = maxInSnapshot > max ? maxInSnapshot : max; } - return isStriped()? + return isStriped() ? HdfsConstants.NUM_DATA_BLOCKS + HdfsConstants.NUM_PARITY_BLOCKS : max; } @@ -841,7 +841,10 @@ public class INodeFile extends INodeWithAdditionalFields */ public final long storagespaceConsumedWithStriped() { BlockInfo[] blockInfos = getBlocks(); - long size = 0; + if (blockInfos == null || blockInfos.length == 0) { + return 0; + } + long size; final int last = blockInfos.length - 1; if (blockInfos[last] instanceof BlockInfoStripedUnderConstruction) { BlockInfoStripedUnderConstruction blockInfoStripedUC @@ -1100,8 +1103,7 @@ public class INodeFile extends INodeWithAdditionalFields */ @VisibleForTesting @Override - // TODO: move erasure coding policy to file XAttr public boolean isStriped() { - return getStoragePolicyID() == HdfsConstants.EC_STORAGE_POLICY_ID; + return getStripedBlocksFeature() != null; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 83686e0..09357cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1843,6 +1843,22 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override // ClientProtocol + public void createErasureCodingZone(String src) + throws IOException { + checkNNStartup(); + final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; + } + boolean success = false; + try { + namesystem.createErasureCodingZone(src, cacheEntry != null); + } finally { + RetryCache.setState(cacheEntry, success); + } + } + + @Override // ClientProtocol public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { checkNNStartup(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index b44c556..183aff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -714,6 +714,13 @@ message GetEditsFromTxidResponseProto { required EventsListProto eventsList = 1; } +message CreateErasureCodingZoneRequestProto { + required string src = 1; +} + +message CreateErasureCodingZoneResponseProto { +} + service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) returns(GetBlockLocationsResponseProto); @@ -856,6 +863,8 @@ service ClientNamenodeProtocol { returns(ListEncryptionZonesResponseProto); rpc getEZForPath(GetEZForPathRequestProto) returns(GetEZForPathResponseProto); + rpc createErasureCodingZone(CreateErasureCodingZoneRequestProto) + returns(CreateErasureCodingZoneResponseProto); rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto) returns(GetCurrentEditLogTxidResponseProto); rpc getEditsFromTxid(GetEditsFromTxidRequestProto) http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index f3cdd7d..89c8e11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -68,7 +68,6 @@ public class TestBlockStoragePolicy { static final short REPLICATION = 3; static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID; - static final byte EC = HdfsConstants.EC_STORAGE_POLICY_ID; static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID; static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID; static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID; @@ -116,9 +115,6 @@ public class TestBlockStoragePolicy { expectedPolicyStrings.put(COLD, "BlockStoragePolicy{COLD:" + COLD + ", storageTypes=[ARCHIVE], " + "creationFallbacks=[], replicationFallbacks=[]}"); - expectedPolicyStrings.put(EC, - "BlockStoragePolicy{EC:" + EC + ", storageTypes=[DISK], " + - "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}"); expectedPolicyStrings.put(WARM, "BlockStoragePolicy{WARM:" + WARM + ", storageTypes=[DISK, ARCHIVE], " + "creationFallbacks=[DISK, ARCHIVE], " + @@ -1163,15 +1159,13 @@ public class TestBlockStoragePolicy { final DistributedFileSystem fs = cluster.getFileSystem(); try { BlockStoragePolicy[] policies = fs.getStoragePolicies(); - Assert.assertEquals(7, policies.length); + Assert.assertEquals(6, policies.length); Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(), policies[0].toString()); - Assert.assertEquals(POLICY_SUITE.getPolicy(EC).toString(), - policies[1].toString()); Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(), - policies[2].toString()); + policies[1].toString()); Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(), - policies[3].toString()); + policies[2].toString()); } finally { IOUtils.cleanup(null, fs); cluster.shutdown(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java new file mode 100644 index 0000000..49f08eef --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class TestErasureCodingZones { + private final int NUM_OF_DATANODES = 3; + private Configuration conf; + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + private static final int BLOCK_SIZE = 1024; + private FSNamesystem namesystem; + + @Before + public void setupCluster() throws IOException { + conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(NUM_OF_DATANODES).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + namesystem = cluster.getNamesystem(); + } + + @After + public void shutdownCluster() throws IOException { + cluster.shutdown(); + } + + @Test + public void testCreateECZone() + throws IOException, InterruptedException { + final Path testDir = new Path("/ec"); + fs.mkdir(testDir, FsPermission.getDirDefault()); + + /* Normal creation of an erasure coding zone */ + fs.getClient().createErasureCodingZone(testDir.toString()); + + /* Verify files under the zone are striped */ + final Path ECFilePath = new Path(testDir, "foo"); + fs.create(ECFilePath); + INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString()); + assertTrue(inode.asFile().isStriped()); + + /* Verify that EC zone cannot be created on non-empty dir */ + final Path notEmpty = new Path("/nonEmpty"); + fs.mkdir(notEmpty, FsPermission.getDirDefault()); + fs.create(new Path(notEmpty, "foo")); + try { + fs.getClient().createErasureCodingZone(notEmpty.toString()); + fail("Erasure coding zone on non-empty dir"); + } catch (IOException e) { + assertExceptionContains("erasure coding zone for a non-empty directory", e); + } + + /* Verify that nested EC zones cannot be created */ + final Path zone1 = new Path("/zone1"); + final Path zone2 = new Path(zone1, "zone2"); + fs.mkdir(zone1, FsPermission.getDirDefault()); + fs.getClient().createErasureCodingZone(zone1.toString()); + fs.mkdir(zone2, FsPermission.getDirDefault()); + try { + fs.getClient().createErasureCodingZone(zone2.toString()); + fail("Nested erasure coding zones"); + } catch (IOException e) { + assertExceptionContains("already in an erasure coding zone", e); + } + + /* Verify that EC zone cannot be created on a file */ + final Path fPath = new Path("/file"); + fs.create(fPath); + try { + fs.getClient().createErasureCodingZone(fPath.toString()); + fail("Erasure coding zone on file"); + } catch (IOException e) { + assertExceptionContains("erasure coding zone for a file", e); + } + } + + @Test + public void testMoveValidity() throws IOException, InterruptedException { + final Path srcECDir = new Path("/srcEC"); + final Path dstECDir = new Path("/dstEC"); + fs.mkdir(srcECDir, FsPermission.getDirDefault()); + fs.mkdir(dstECDir, FsPermission.getDirDefault()); + fs.getClient().createErasureCodingZone(srcECDir.toString()); + fs.getClient().createErasureCodingZone(dstECDir.toString()); + final Path srcFile = new Path(srcECDir, "foo"); + fs.create(srcFile); + + /* Verify that a file can be moved between 2 EC zones */ + try { + fs.rename(srcFile, dstECDir); + } catch (IOException e) { + fail("A file should be able to move between 2 EC zones " + e); + } + + // Move the file back + fs.rename(new Path(dstECDir, "foo"), srcECDir); + + /* Verify that a file cannot be moved from a non-EC dir to an EC zone */ + final Path nonECDir = new Path("/nonEC"); + fs.mkdir(nonECDir, FsPermission.getDirDefault()); + try { + fs.rename(srcFile, nonECDir); + fail("A file shouldn't be able to move from a non-EC dir to an EC zone"); + } catch (IOException e) { + assertExceptionContains("can't be moved because the source and " + + "destination have different erasure coding policies", e); + } + + /* Verify that a file cannot be moved from an EC zone to a non-EC dir */ + final Path nonECFile = new Path(nonECDir, "nonECFile"); + fs.create(nonECFile); + try { + fs.rename(nonECFile, dstECDir); + } catch (IOException e) { + assertExceptionContains("can't be moved because the source and " + + "destination have different erasure coding policies", e); + } + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java deleted file mode 100644 index a84f67b..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInitialEncoding.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.*; -import org.apache.hadoop.hdfs.client.HdfsAdmin; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.INode; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.EC_STORAGE_POLICY_NAME; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.EC_STORAGE_POLICY_ID; -import static org.junit.Assert.assertEquals; - -public class TestBlockInitialEncoding { - private final int NUM_OF_DATANODES = 3; - private Configuration conf; - private MiniDFSCluster cluster; - private DistributedFileSystem fs; - private static final int BLOCK_SIZE = 1024; - private HdfsAdmin dfsAdmin; - private FSNamesystem namesystem; - - @Before - public void setupCluster() throws IOException { - conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - cluster = new MiniDFSCluster.Builder(conf). - numDataNodes(NUM_OF_DATANODES).build(); - cluster.waitActive(); - fs = cluster.getFileSystem(); - dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); - namesystem = cluster.getNamesystem(); - } - - @After - public void shutdownCluster() throws IOException { - cluster.shutdown(); - } - - @Test - public void testBlockInitialEncoding() - throws IOException, InterruptedException { - final Path testDir = new Path("/test"); - fs.mkdir(testDir, FsPermission.getDirDefault()); - dfsAdmin.setStoragePolicy(testDir, EC_STORAGE_POLICY_NAME); - final Path ECFilePath = new Path("/test/foo.ec"); - DFSTestUtil.createFile(fs, ECFilePath, 4 * BLOCK_SIZE, (short) 3, 0); - INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString()); - assertEquals(EC_STORAGE_POLICY_ID, inode.getStoragePolicyID()); - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index 215a4e4..c3c8239 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -68,7 +68,7 @@ public class TestAddStripedBlocks { .numDataNodes(GROUP_SIZE).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); - dfs.setStoragePolicy(new Path("/"), HdfsConstants.EC_STORAGE_POLICY_NAME); + dfs.getClient().createErasureCodingZone("/"); } @After http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index d3cb749..407d07e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -444,8 +444,7 @@ public class TestFSEditLogLoader { //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.setStoragePolicy(new Path(testDir), - HdfsConstants.EC_STORAGE_POLICY_NAME); + fs.getClient().getNamenode().createErasureCodingZone(testDir); // Create a file with striped block Path p = new Path(testFilePath); @@ -517,8 +516,7 @@ public class TestFSEditLogLoader { //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.setStoragePolicy(new Path(testDir), - HdfsConstants.EC_STORAGE_POLICY_NAME); + fs.getClient().getNamenode().createErasureCodingZone(testDir); //create a file with striped blocks Path p = new Path(testFilePath); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 440f5cd..83f01c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -33,18 +33,14 @@ import java.io.IOException; import java.util.EnumSet; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.Assert; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -59,7 +55,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.util.MD5FileUtils; @@ -137,9 +132,10 @@ public class TestFSImage { } } - private void testSaveAndLoadINodeFile(FSNamesystem fsn, Configuration conf, - boolean isUC) throws IOException{ + private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, + boolean isUC) throws IOException{ // contruct a INode with StripedBlock for saving and loading + fsn.createErasureCodingZone("/", false); long id = 123456789; byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes(); PermissionStatus permissionStatus = new PermissionStatus("testuser_a", @@ -149,9 +145,8 @@ public class TestFSImage { BlockInfoContiguous[] blks = new BlockInfoContiguous[0]; short replication = 3; long preferredBlockSize = 128*1024*1024; - byte storagePolicyID = HdfsConstants.EC_STORAGE_POLICY_ID; INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime, - blks, replication, preferredBlockSize, storagePolicyID); + blks, replication, preferredBlockSize); ByteArrayOutputStream bs = new ByteArrayOutputStream(); file.addStripedBlocksFeature(); @@ -237,13 +232,13 @@ public class TestFSImage { * FSImageSerialization and loaded by FSImageFormat#Loader. */ @Test - public void testSaveAndLoadInodeFile() throws IOException{ + public void testSaveAndLoadStripedINodeFile() throws IOException{ Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); - testSaveAndLoadINodeFile(cluster.getNamesystem(), conf, false); + testSaveAndLoadStripedINodeFile(cluster.getNamesystem(), conf, false); } finally { if (cluster != null) { cluster.shutdown(); @@ -256,14 +251,14 @@ public class TestFSImage { * saved and loaded by FSImageSerialization */ @Test - public void testSaveAndLoadInodeFileUC() throws IOException{ + public void testSaveAndLoadStripedINodeFileUC() throws IOException{ // construct a INode with StripedBlock for saving and loading Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); - testSaveAndLoadINodeFile(cluster.getNamesystem(), conf, true); + testSaveAndLoadStripedINodeFile(cluster.getNamesystem(), conf, true); } finally { if (cluster != null) { cluster.shutdown(); @@ -402,7 +397,7 @@ public class TestFSImage { .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); - fs.setStoragePolicy(new Path("/"), HdfsConstants.EC_STORAGE_POLICY_NAME); + fs.getClient().getNamenode().createErasureCodingZone("/"); Path file = new Path("/striped"); FSDataOutputStream out = fs.create(file); byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d717dc3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java index b9fd4fe..d965ae7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRecoverStripedBlocks.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; @@ -37,23 +36,19 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockECRecoveryInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.io.IOUtils; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; import java.io.IOException; -import java.util.Iterator; import java.util.List; import java.util.UUID; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.EC_STORAGE_POLICY_NAME; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_DATA_BLOCKS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -93,7 +88,7 @@ public class TestRecoverStripedBlocks { int numBlocks) throws Exception { DistributedFileSystem dfs = cluster.getFileSystem(); dfs.mkdirs(dir); - dfs.setStoragePolicy(dir, EC_STORAGE_POLICY_NAME); + dfs.getClient().getNamenode().createErasureCodingZone(dir.toString()); FSDataOutputStream out = null; try {