From common-commits-return-85125-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Thu Jul 5 06:17:09 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id BA415180608 for ; Thu, 5 Jul 2018 06:17:07 +0200 (CEST) Received: (qmail 26793 invoked by uid 500); 5 Jul 2018 04:16:58 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 22222 invoked by uid 99); 5 Jul 2018 04:16:55 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 05 Jul 2018 04:16:55 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id A76E3E0F7B; Thu, 5 Jul 2018 04:16:54 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: rakeshr@apache.org To: common-commits@hadoop.apache.org Date: Thu, 05 Jul 2018 04:17:16 -0000 Message-Id: In-Reply-To: <15f5744de16e4c81a311c8106caf3cd6@git.apache.org> References: <15f5744de16e4c81a311c8106caf3cd6@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [23/50] [abbrv] hadoop git commit: HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore. HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6016d283 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6016d283 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6016d283 Branch: refs/heads/HDFS-10285 Commit: 6016d283f0f083fd3a72fac0f492bd8262cf56d3 Parents: ce8e901 Author: Rakesh Radhakrishnan Authored: Fri Nov 3 08:18:14 2017 +0530 Committer: Rakesh Radhakrishnan Committed: Thu Jul 5 08:44:03 2018 +0530 ---------------------------------------------------------------------- .../java/org/apache/hadoop/hdfs/DFSClient.java | 22 ++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 21 ++++ .../hadoop/hdfs/protocol/HdfsConstants.java | 27 +++++ .../ClientNamenodeProtocolTranslatorPB.java | 20 ++++ .../hadoop/hdfs/protocolPB/PBHelperClient.java | 33 ++++++ .../src/main/proto/ClientNamenodeProtocol.proto | 17 ++- ...tNamenodeProtocolServerSideTranslatorPB.java | 23 +++- .../server/blockmanagement/BlockManager.java | 12 ++ .../namenode/BlockStorageMovementNeeded.java | 109 +++++++++++++++++++ .../hdfs/server/namenode/NameNodeRpcServer.java | 13 ++- .../server/namenode/StoragePolicySatisfier.java | 8 ++ .../hadoop/hdfs/tools/StoragePolicyAdmin.java | 35 +++++- .../src/site/markdown/ArchivalStorage.md | 3 +- .../TestPersistentStoragePolicySatisfier.java | 2 +- .../namenode/TestStoragePolicySatisfier.java | 67 ++++++++++++ .../hdfs/tools/TestStoragePolicyCommands.java | 18 +++ 16 files changed, 424 insertions(+), 6 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 7337aa2..471ab2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; @@ -3169,4 +3170,25 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, checkOpen(); return new OpenFilesIterator(namenode, tracer, openFilesTypes, path); } + + /** + * Check the storage policy satisfy status of the path for which + * {@link DFSClient#satisfyStoragePolicy(String)} is called. + * + * @return Storage policy satisfy status. + *
    + *
  • PENDING if path is in queue and not processed for satisfying + * the policy.
  • + *
  • IN_PROGRESS if satisfying the storage policy for path.
  • + *
  • SUCCESS if storage policy satisfied for the path.
  • + *
  • NOT_AVAILABLE if + * {@link DFSClient#satisfyStoragePolicy(String)} not called for + * path or SPS work is already finished.
  • + *
+ * @throws IOException + */ + public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException { + return namenode.checkStoragePolicySatisfyPathStatus(path); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 81d7c91..360fd63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; @@ -1764,4 +1765,24 @@ public interface ClientProtocol { */ @Idempotent boolean isStoragePolicySatisfierRunning() throws IOException; + + /** + * Check the storage policy satisfy status of the path for which + * {@link ClientProtocol#satisfyStoragePolicy(String)} is called. + * + * @return Storage policy satisfy status. + *
    + *
  • PENDING if path is in queue and not processed for satisfying + * the policy.
  • + *
  • IN_PROGRESS if satisfying the storage policy for path.
  • + *
  • SUCCESS if storage policy satisfied for the path.
  • + *
  • NOT_AVAILABLE if + * {@link ClientProtocol#satisfyStoragePolicy(String)} not called for + * path or SPS work is already finished.
  • + *
+ * @throws IOException + */ + @Idempotent + StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 74efcd2..190a1c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -128,6 +128,33 @@ public final class HdfsConstants { SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET, SAFEMODE_FORCE_EXIT } + /** + * Storage policy satisfy path status. + */ + public enum StoragePolicySatisfyPathStatus { + /** + * Scheduled but not yet processed. This will come only in case of + * directory. Directory will be added first in "pendingWorkForDirectory" + * queue and then later it is processed recursively. + */ + PENDING, + + /** + * Satisfying the storage policy for path. + */ + IN_PROGRESS, + + /** + * Storage policy satisfied for the path. + */ + SUCCESS, + + /** + * Status not available. + */ + NOT_AVAILABLE + } + public enum RollingUpgradeAction { QUERY, PREPARE, FINALIZE; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index d7c32bc..cdc8eac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; @@ -100,6 +101,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; @@ -241,6 +244,7 @@ import org.apache.hadoop.security.token.Token; import com.google.protobuf.ByteString; import com.google.protobuf.Message; import com.google.protobuf.ServiceException; + import org.apache.hadoop.util.concurrent.AsyncGet; /** @@ -1973,4 +1977,20 @@ public class ClientNamenodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + @Override + public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException { + try { + CheckStoragePolicySatisfyPathStatusRequestProto request = + CheckStoragePolicySatisfyPathStatusRequestProto.newBuilder() + .setSrc(path) + .build(); + CheckStoragePolicySatisfyPathStatusResponseProto response = rpcProxy + .checkStoragePolicySatisfyPathStatus(null, request); + return PBHelperClient.convert(response.getStatus()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 490ccb4..582693f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -130,6 +130,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheF import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; @@ -3377,4 +3378,36 @@ public class PBHelperClient { } return typeProtos; } + + public static StoragePolicySatisfyPathStatus convert( + HdfsConstants.StoragePolicySatisfyPathStatus status) { + switch (status) { + case PENDING: + return StoragePolicySatisfyPathStatus.PENDING; + case IN_PROGRESS: + return StoragePolicySatisfyPathStatus.IN_PROGRESS; + case SUCCESS: + return StoragePolicySatisfyPathStatus.SUCCESS; + case NOT_AVAILABLE: + return StoragePolicySatisfyPathStatus.NOT_AVAILABLE; + default: + throw new IllegalArgumentException("Unexpected SPSStatus :" + status); + } + } + + public static HdfsConstants.StoragePolicySatisfyPathStatus convert( + StoragePolicySatisfyPathStatus status) { + switch (status) { + case PENDING: + return HdfsConstants.StoragePolicySatisfyPathStatus.PENDING; + case IN_PROGRESS: + return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS; + case SUCCESS: + return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS; + case NOT_AVAILABLE: + return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE; + default: + throw new IllegalArgumentException("Unexpected SPSStatus :" + status); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index c84640f..2b666c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -477,7 +477,6 @@ message RollingUpgradeInfoProto { message RollingUpgradeResponseProto { optional RollingUpgradeInfoProto rollingUpgradeInfo= 1; } - message ListCorruptFileBlocksRequestProto { required string path = 1; optional string cookie = 2; @@ -842,6 +841,20 @@ message IsStoragePolicySatisfierRunningResponseProto { required bool running = 1; } +message CheckStoragePolicySatisfyPathStatusRequestProto { // no parameters + required string src = 1; +} + +message CheckStoragePolicySatisfyPathStatusResponseProto { + enum StoragePolicySatisfyPathStatus { + PENDING = 0; + IN_PROGRESS = 1; + SUCCESS = 2; + NOT_AVAILABLE = 3; + } + required StoragePolicySatisfyPathStatus status = 1; +} + service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) returns(GetBlockLocationsResponseProto); @@ -1032,4 +1045,6 @@ service ClientNamenodeProtocol { returns(SatisfyStoragePolicyResponseProto); rpc isStoragePolicySatisfierRunning(IsStoragePolicySatisfierRunningRequestProto) returns(IsStoragePolicySatisfierRunningResponseProto); + rpc checkStoragePolicySatisfyPathStatus(CheckStoragePolicySatisfyPathStatusRequestProto) + returns(CheckStoragePolicySatisfyPathStatusResponseProto); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index f338d4e..09f7ce2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -85,6 +86,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Append import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; @@ -257,7 +260,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCod import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.hdfs.protocol.proto.*; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; @@ -1922,4 +1925,22 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } return VOID_SATISFYSTORAGEPOLICY_RESPONSE; } + + @Override + public CheckStoragePolicySatisfyPathStatusResponseProto + checkStoragePolicySatisfyPathStatus(RpcController controller, + CheckStoragePolicySatisfyPathStatusRequestProto request) + throws ServiceException { + try { + StoragePolicySatisfyPathStatus status = server + .checkStoragePolicySatisfyPathStatus(request.getSrc()); + CheckStoragePolicySatisfyPathStatusResponseProto.Builder builder = + CheckStoragePolicySatisfyPathStatusResponseProto + .newBuilder(); + builder.setStatus(PBHelperClient.convert(status)); + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 0ee558a..c81ed6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -47,6 +47,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.FutureTask; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; + import javax.management.ObjectName; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -68,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; @@ -5095,4 +5097,14 @@ public class BlockManager implements BlockStatsMXBean { public boolean isStoragePolicySatisfierRunning() { return sps.isRunning(); } + + /** + * @return status + * Storage policy satisfy status of the path. + * @throws IOException + */ + public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException { + return sps.checkStoragePolicySatisfyPathStatus(path); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java index 788a98b..8f7487c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementNeeded.java @@ -26,13 +26,17 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo; import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier.ItemInfo; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,6 +66,9 @@ public class BlockStorageMovementNeeded { private final Map pendingWorkForDirectory = new HashMap(); + private final Map spsStatus = + new ConcurrentHashMap<>(); + private final Namesystem namesystem; // List of pending dir to satisfy the policy @@ -73,6 +80,10 @@ public class BlockStorageMovementNeeded { private final int maxQueuedItem; + // Amount of time to cache the SUCCESS status of path before turning it to + // NOT_AVAILABLE. + private static long statusClearanceElapsedTimeMs = 300000; + public BlockStorageMovementNeeded(Namesystem namesystem, StoragePolicySatisfier sps, int queueLimit) { this.namesystem = namesystem; @@ -88,6 +99,9 @@ public class BlockStorageMovementNeeded { * - track info for satisfy the policy */ public synchronized void add(ItemInfo trackInfo) { + spsStatus.put(trackInfo.getStartId(), + new StoragePolicySatisfyPathStatusInfo( + StoragePolicySatisfyPathStatus.IN_PROGRESS)); storageMovementNeeded.add(trackInfo); } @@ -125,6 +139,8 @@ public class BlockStorageMovementNeeded { } public synchronized void addToPendingDirQueue(long id) { + spsStatus.put(id, new StoragePolicySatisfyPathStatusInfo( + StoragePolicySatisfyPathStatus.PENDING)); spsDirsToBeTraveresed.add(id); // Notify waiting FileInodeIdCollector thread about the newly // added SPS path. @@ -172,6 +188,7 @@ public class BlockStorageMovementNeeded { if (inode == null) { // directory deleted just remove it. this.pendingWorkForDirectory.remove(startId); + markSuccess(startId); } else { DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId); if (pendingWork != null) { @@ -179,6 +196,7 @@ public class BlockStorageMovementNeeded { if (pendingWork.isDirWorkDone()) { namesystem.removeXattr(startId, XATTR_SATISFY_STORAGE_POLICY); pendingWorkForDirectory.remove(startId); + markSuccess(startId); } } } @@ -187,6 +205,7 @@ public class BlockStorageMovementNeeded { // storageMovementAttemptedItems or file policy satisfied. namesystem.removeXattr(trackInfo.getTrackId(), XATTR_SATISFY_STORAGE_POLICY); + markSuccess(trackInfo.getStartId()); } } @@ -203,6 +222,19 @@ public class BlockStorageMovementNeeded { } /** + * Mark inode status as SUCCESS in map. + */ + private void markSuccess(long startId){ + StoragePolicySatisfyPathStatusInfo spsStatusInfo = + spsStatus.get(startId); + if (spsStatusInfo == null) { + spsStatusInfo = new StoragePolicySatisfyPathStatusInfo(); + spsStatus.put(startId, spsStatusInfo); + } + spsStatusInfo.setSuccess(); + } + + /** * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded * and notify to clean up required resources. * @throws IOException @@ -256,6 +288,7 @@ public class BlockStorageMovementNeeded { @Override public void run() { LOG.info("Starting FileInodeIdCollector!."); + long lastStatusCleanTime = 0; while (namesystem.isRunning() && sps.isRunning()) { try { if (!namesystem.isInSafeMode()) { @@ -271,6 +304,9 @@ public class BlockStorageMovementNeeded { if (startInode != null) { try { remainingCapacity = remainingCapacity(); + spsStatus.put(startINodeId, + new StoragePolicySatisfyPathStatusInfo( + StoragePolicySatisfyPathStatus.IN_PROGRESS)); readLock(); traverseDir(startInode.asDirectory(), startINodeId, HdfsFileStatus.EMPTY_NAME, @@ -289,9 +325,16 @@ public class BlockStorageMovementNeeded { namesystem.removeXattr(startInode.getId(), XATTR_SATISFY_STORAGE_POLICY); pendingWorkForDirectory.remove(startInode.getId()); + markSuccess(startInode.getId()); } } } + //Clear the SPS status if status is in SUCCESS more than 5 min. + if (Time.monotonicNow() + - lastStatusCleanTime > statusClearanceElapsedTimeMs) { + lastStatusCleanTime = Time.monotonicNow(); + cleanSpsStatus(); + } } } catch (Throwable t) { LOG.warn("Exception while loading inodes to satisfy the policy", t); @@ -299,6 +342,16 @@ public class BlockStorageMovementNeeded { } } + private synchronized void cleanSpsStatus() { + for (Iterator> it = + spsStatus.entrySet().iterator(); it.hasNext();) { + Entry entry = it.next(); + if (entry.getValue().canRemove()) { + it.remove(); + } + } + } + @Override protected void checkPauseForTesting() throws InterruptedException { // TODO implement if needed @@ -434,4 +487,60 @@ public class BlockStorageMovementNeeded { return startId; } } + + /** + * Represent the file/directory block movement status. + */ + static class StoragePolicySatisfyPathStatusInfo { + private StoragePolicySatisfyPathStatus status = + StoragePolicySatisfyPathStatus.NOT_AVAILABLE; + private long lastStatusUpdateTime; + + StoragePolicySatisfyPathStatusInfo() { + this.lastStatusUpdateTime = 0; + } + + StoragePolicySatisfyPathStatusInfo(StoragePolicySatisfyPathStatus status) { + this.status = status; + this.lastStatusUpdateTime = 0; + } + + private void setSuccess() { + this.status = StoragePolicySatisfyPathStatus.SUCCESS; + this.lastStatusUpdateTime = Time.monotonicNow(); + } + + private StoragePolicySatisfyPathStatus getStatus() { + return status; + } + + /** + * Return true if SUCCESS status cached more then 5 min. + */ + private boolean canRemove() { + return StoragePolicySatisfyPathStatus.SUCCESS == status + && (Time.monotonicNow() + - lastStatusUpdateTime) > statusClearanceElapsedTimeMs; + } + } + + public StoragePolicySatisfyPathStatus getStatus(long id) { + StoragePolicySatisfyPathStatusInfo spsStatusInfo = spsStatus.get(id); + if(spsStatusInfo == null){ + return StoragePolicySatisfyPathStatus.NOT_AVAILABLE; + } + return spsStatusInfo.getStatus(); + } + + @VisibleForTesting + public static void setStatusClearanceElapsedTimeMs( + long statusClearanceElapsedTimeMs) { + BlockStorageMovementNeeded.statusClearanceElapsedTimeMs = + statusClearanceElapsedTimeMs; + } + + @VisibleForTesting + public static long getStatusClearanceElapsedTimeMs() { + return statusClearanceElapsedTimeMs; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 2f621e6..4738bf5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -28,7 +28,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH; - import static org.apache.hadoop.util.Time.now; import java.io.FileNotFoundException; @@ -111,6 +110,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -2542,4 +2542,15 @@ public class NameNodeRpcServer implements NamenodeProtocols { } return namesystem.getBlockManager().isStoragePolicySatisfierRunning(); } + + @Override + public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException { + checkNNStartup(); + if (nn.isStandbyState()) { + throw new StandbyException("Not supported by Standby Namenode."); + } + return namesystem.getBlockManager().checkStoragePolicySatisfyPathStatus( + path); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java index cbfba44..2382d36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.util.Time.monotonicNow; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -36,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.server.balancer.Matcher; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -934,4 +936,10 @@ public class StoragePolicySatisfier implements Runnable { } } + + public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus( + String path) throws IOException { + INode inode = namesystem.getFSDirectory().getINode(path); + return storageMovementNeeded.getStatus(inode.getId()); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java index c351410..05498d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.tools.TableListing; import org.apache.hadoop.util.StringUtils; @@ -258,7 +259,7 @@ public class StoragePolicyAdmin extends Configured implements Tool { @Override public String getShortUsage() { - return "[" + getName() + " -path ]\n"; + return "[" + getName() + " [-w] -path ]\n"; } @Override @@ -266,6 +267,14 @@ public class StoragePolicyAdmin extends Configured implements Tool { TableListing listing = AdminHelper.getOptionDescriptionListing(); listing.addRow("", "The path of the file/directory to satisfy" + " storage policy"); + listing.addRow("-w", + "It requests that the command wait till all the files satisfy" + + " the policy in given path. This will print the current" + + "status of the path in each 10 sec and status are:\n" + + "PENDING : Path is in queue and not processed for satisfying" + + " the policy.\nIN_PROGRESS : Satisfying the storage policy for" + + " path.\nSUCCESS : Storage policy satisfied for the path.\n" + + "NOT_AVAILABLE : Status not available."); return getShortUsage() + "\n" + "Schedule blocks to move based on file/directory policy.\n\n" + listing.toString(); @@ -285,12 +294,36 @@ public class StoragePolicyAdmin extends Configured implements Tool { dfs.satisfyStoragePolicy(new Path(path)); System.out.println("Scheduled blocks to move based on the current" + " storage policy on " + path); + boolean waitOpt = StringUtils.popOption("-w", args); + if (waitOpt) { + waitForSatisfyPolicy(dfs, path); + } } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); return 2; } return 0; } + + + private void waitForSatisfyPolicy(DistributedFileSystem dfs, String path) + throws IOException { + System.out.println("Waiting for satisfy the policy ..."); + while (true) { + StoragePolicySatisfyPathStatus status = dfs.getClient() + .checkStoragePolicySatisfyPathStatus(path); + if (StoragePolicySatisfyPathStatus.SUCCESS.equals(status)) { + System.out.println(status); + break; + } + System.out.println(status); + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + } + } + System.out.println(" done"); + } } /** Command to check storage policy satisfier status. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md index 5defbd0..cf17e99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md @@ -217,13 +217,14 @@ Schedule blocks to move based on file's/directory's current storage policy. * Command: - hdfs storagepolicies -satisfyStoragePolicy -path + hdfs storagepolicies -satisfyStoragePolicy [-w] -path * Arguments: | | | |:---- |:---- | | `-path ` | The path referring to either a directory or a file. | +| `-w` | It requests that the command wait till all the files satisfy the policy in given path. This will print the current status of the path in each 10 sec and status are:
PENDING - Path is in queue and not processed for satisfying the policy.
IN_PROGRESS - Satisfying the storage policy for path.
SUCCESS - Storage policy satisfied for the path.
NOT_AVAILABLE - Status not available. | ### SPS Running Status http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java index 7165d06..c301b8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java @@ -479,9 +479,9 @@ public class TestPersistentStoragePolicySatisfier { clusterSetUp(); fs.setStoragePolicy(parentDir, "COLD"); fs.satisfyStoragePolicy(childDir); - fs.satisfyStoragePolicy(parentDir); DFSTestUtil.waitExpectedStorageType(childFileName, StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem()); + fs.satisfyStoragePolicy(parentDir); DFSTestUtil.waitExpectedStorageType(parentFileName, StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem()); } finally { http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index 70219f6..f42d911 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -1463,6 +1464,72 @@ public class TestStoragePolicySatisfier { } } + @Test(timeout = 300000) + public void testStoragePolicySatisfyPathStatus() throws Exception { + try { + config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, + true); + config.set(DFSConfigKeys + .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY, + "3000"); + config.setBoolean(DFSConfigKeys + .DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_KEY, + true); + + StorageType[][] storagetypes = new StorageType[][] { + {StorageType.ARCHIVE, StorageType.DISK}, + {StorageType.ARCHIVE, StorageType.DISK}}; + hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(2) + .storageTypes(storagetypes).build(); + hdfsCluster.waitActive(); + BlockStorageMovementNeeded.setStatusClearanceElapsedTimeMs(20000); + dfs = hdfsCluster.getFileSystem(); + Path filePath = new Path("/file"); + DFSTestUtil.createFile(dfs, filePath, 1024, (short) 2, + 0); + dfs.setStoragePolicy(filePath, "COLD"); + dfs.satisfyStoragePolicy(filePath); + StoragePolicySatisfyPathStatus status = dfs.getClient() + .checkStoragePolicySatisfyPathStatus(filePath.toString()); + Assert.assertTrue("Status should be IN_PROGRESS", + StoragePolicySatisfyPathStatus.IN_PROGRESS.equals(status)); + DFSTestUtil.waitExpectedStorageType(filePath.toString(), + StorageType.ARCHIVE, 2, 30000, dfs); + + // wait till status is SUCCESS + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + StoragePolicySatisfyPathStatus status = dfs.getClient() + .checkStoragePolicySatisfyPathStatus(filePath.toString()); + return StoragePolicySatisfyPathStatus.SUCCESS.equals(status); + } catch (IOException e) { + Assert.fail("Fail to get path status for sps"); + } + return false; + } + }, 100, 60000); + + // wait till status is NOT_AVAILABLE + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + StoragePolicySatisfyPathStatus status = dfs.getClient() + .checkStoragePolicySatisfyPathStatus(filePath.toString()); + return StoragePolicySatisfyPathStatus.NOT_AVAILABLE.equals(status); + } catch (IOException e) { + Assert.fail("Fail to get path status for sps"); + } + return false; + } + }, 100, 60000); + } finally { + shutdownCluster(); + } + } + private static void createDirectoryTree(DistributedFileSystem dfs) throws Exception { // tree structure http://git-wip-us.apache.org/repos/asf/hadoop/blob/6016d283/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java index 1a38105..0644a83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java @@ -204,4 +204,22 @@ public class TestStoragePolicyCommands { DFSTestUtil.toolRun(admin, "-isSatisfierRunning status", 1, "Can't understand arguments: "); } + + @Test(timeout = 90000) + public void testSatisfyStoragePolicyCommandWithWaitOption() + throws Exception { + final String file = "/testSatisfyStoragePolicyCommandWithWaitOption"; + DFSTestUtil.createFile(fs, new Path(file), SIZE, REPL, 0); + + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + + DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file + + " -policy COLD", 0, "Set storage policy COLD on " + file.toString()); + + DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -w -path " + file, 0, + "Waiting for satisfy the policy"); + + DFSTestUtil + .waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, fs); + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org