Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 90AF811651 for ; Thu, 25 Sep 2014 03:09:32 +0000 (UTC) Received: (qmail 38724 invoked by uid 500); 25 Sep 2014 03:09:25 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 38585 invoked by uid 500); 25 Sep 2014 03:09:25 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 38519 invoked by uid 99); 25 Sep 2014 03:09:25 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 25 Sep 2014 03:09:25 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id F023F913CCF; Thu, 25 Sep 2014 03:09:24 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Date: Thu, 25 Sep 2014 03:09:36 -0000 Message-Id: <96e4a4a74cbd493994aa39af7d06fd79@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [17/18] git commit: HDFS-7140. Add a tool to list all the existing block storage policies. Contributed by Jing Zhao. HDFS-7140. Add a tool to list all the existing block storage policies. Contributed by Jing Zhao. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/428a7666 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/428a7666 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/428a7666 Branch: refs/heads/HDFS-6581 Commit: 428a76663a0de5d0d74cc9525273ddc470760e44 Parents: 72b0881 Author: Jing Zhao Authored: Wed Sep 24 19:11:16 2014 -0700 Committer: Jing Zhao Committed: Wed Sep 24 19:11:16 2014 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop-hdfs/src/main/bin/hdfs | 4 ++ .../hadoop-hdfs/src/main/bin/hdfs.cmd | 7 ++- .../java/org/apache/hadoop/hdfs/DFSClient.java | 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 4 +- .../hdfs/protocol/BlockStoragePolicy.java | 2 +- .../hadoop/hdfs/protocol/ClientProtocol.java | 4 +- ...tNamenodeProtocolServerSideTranslatorPB.java | 14 ++--- .../ClientNamenodeProtocolTranslatorPB.java | 16 ++--- .../server/blockmanagement/BlockManager.java | 2 +- .../apache/hadoop/hdfs/server/mover/Mover.java | 6 +- .../hdfs/server/namenode/FSNamesystem.java | 4 +- .../hdfs/server/namenode/NameNodeRpcServer.java | 4 +- .../org/apache/hadoop/hdfs/tools/DFSAdmin.java | 2 +- .../hadoop/hdfs/tools/GetStoragePolicies.java | 65 ++++++++++++++++++++ .../src/main/proto/ClientNamenodeProtocol.proto | 8 +-- .../hadoop/hdfs/TestBlockStoragePolicy.java | 11 ++-- 17 files changed, 119 insertions(+), 41 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f87e5c6..13beb8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -345,6 +345,9 @@ Trunk (Unreleased) HDFS-7081. Add new DistributedFileSystem API for getting all the existing storage policies. (jing9) + HDFS-7140. Add a tool to list all the existing block storage policies. + (jing9) + Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 087c674..38726a8 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -48,6 +48,7 @@ function hadoop_usage echo " secondarynamenode run the DFS secondary namenode" echo " snapshotDiff diff two snapshots of a directory or diff the" echo " current directory contents with a snapshot" + echo " storagepolicies get all the existing block storage policies" echo " zkfc run the ZK Failover Controller daemon" echo "" echo "Most commands print help when invoked w/o parameters." @@ -216,6 +217,9 @@ case ${COMMAND} in snapshotDiff) CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff ;; + storagepolicies) + CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies + ;; zkfc) daemon="true" CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 9fb8426..69424ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -47,7 +47,7 @@ if "%1" == "--config" ( goto print_usage ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -155,6 +155,10 @@ goto :eof set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_MOVER_OPTS% goto :eof +:storagepolicies + set CLASS=org.apache.hadoop.hdfs.tools.GetStoragePolicies + goto :eof + @rem This changes %1, %2 etc. Hence those cannot be used after calling this. :make_command_arguments if "%1" == "--config" ( @@ -204,6 +208,7 @@ goto :eof @echo Use -help to see options @echo cacheadmin configure the HDFS cache @echo mover run a utility to move block replicas across storage types + @echo storagepolicies get all the existing block storage policies @echo. @echo Most commands print help when invoked w/o parameters. http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 03f5670..e9fe06f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1783,8 +1783,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, /** * @return All the existing storage policies */ - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { - return namenode.getStoragePolicySuite(); + public BlockStoragePolicy[] getStoragePolicies() throws IOException { + return namenode.getStoragePolicies(); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index a9507f5..cd000e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -506,9 +506,9 @@ public class DistributedFileSystem extends FileSystem { } /** Get all the existing storage policies */ - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { + public BlockStoragePolicy[] getStoragePolicies() throws IOException { statistics.incrementReadOps(1); - return dfs.getStoragePolicySuite(); + return dfs.getStoragePolicies(); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java index 35bef51..8ca83a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java @@ -209,7 +209,7 @@ public class BlockStoragePolicy { return getClass().getSimpleName() + "{" + name + ":" + id + ", storageTypes=" + Arrays.asList(storageTypes) + ", creationFallbacks=" + Arrays.asList(creationFallbacks) - + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks); + + ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}"; } public byte getId() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 7e16feb..df67db6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -43,13 +43,11 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventsList; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -264,7 +262,7 @@ public interface ClientProtocol { * @return All the in-use block storage policies currently. */ @Idempotent - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException; + public BlockStoragePolicy[] getStoragePolicies() throws IOException; /** * Set the storage policy for a file/directory http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 26a9762..adad3b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -120,8 +120,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; @@ -1433,13 +1433,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override - public GetStoragePolicySuiteResponseProto getStoragePolicySuite( - RpcController controller, GetStoragePolicySuiteRequestProto request) + public GetStoragePoliciesResponseProto getStoragePolicies( + RpcController controller, GetStoragePoliciesRequestProto request) throws ServiceException { try { - BlockStoragePolicy[] policies = server.getStoragePolicySuite(); - GetStoragePolicySuiteResponseProto.Builder builder = - GetStoragePolicySuiteResponseProto.newBuilder(); + BlockStoragePolicy[] policies = server.getStoragePolicies(); + GetStoragePoliciesResponseProto.Builder builder = + GetStoragePoliciesResponseProto.newBuilder(); if (policies == null) { return builder.build(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 22238b4..90b52e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -118,8 +118,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicySuiteResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; @@ -226,9 +226,9 @@ public class ClientNamenodeProtocolTranslatorPB implements VOID_GET_DATA_ENCRYPTIONKEY_REQUEST = GetDataEncryptionKeyRequestProto.newBuilder().build(); - private final static GetStoragePolicySuiteRequestProto - VOID_GET_STORAGE_POLICY_SUITE_REQUEST = - GetStoragePolicySuiteRequestProto.newBuilder().build(); + private final static GetStoragePoliciesRequestProto + VOID_GET_STORAGE_POLICIES_REQUEST = + GetStoragePoliciesRequestProto.newBuilder().build(); public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { rpcProxy = proxy; @@ -1456,10 +1456,10 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { + public BlockStoragePolicy[] getStoragePolicies() throws IOException { try { - GetStoragePolicySuiteResponseProto response = rpcProxy - .getStoragePolicySuite(null, VOID_GET_STORAGE_POLICY_SUITE_REQUEST); + GetStoragePoliciesResponseProto response = rpcProxy + .getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST); return PBHelper.convertStoragePolicies(response.getPoliciesList()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4cdec30..ad170d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -402,7 +402,7 @@ public class BlockManager { return storagePolicySuite.getPolicy(policyName); } - public BlockStoragePolicy[] getStoragePolicySuite() { + public BlockStoragePolicy[] getStoragePolicies() { return storagePolicySuite.getAllPolicies(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index c222181..4db0df6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -142,7 +142,7 @@ public class Mover { private void initStoragePolicies() throws IOException { BlockStoragePolicy[] policies = dispatcher.getDistributedFileSystem() - .getStoragePolicySuite(); + .getStoragePolicies(); for (BlockStoragePolicy policy : policies) { this.blockStoragePolicies[policy.getId()] = policy; } @@ -387,8 +387,8 @@ public class Mover { boolean scheduleMoveReplica(DBlock db, MLocation ml, List targetTypes) { final Source source = storages.getSource(ml); - return source == null ? false : scheduleMoveReplica(db, - storages.getSource(ml), targetTypes); + return source == null ? false : scheduleMoveReplica(db, source, + targetTypes); } boolean scheduleMoveReplica(DBlock db, Source source, http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9ee6448..3b7f050 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2304,13 +2304,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** * @return All the existing block storage policies */ - BlockStoragePolicy[] getStoragePolicySuite() throws IOException { + BlockStoragePolicy[] getStoragePolicies() throws IOException { checkOperation(OperationCategory.READ); waitForLoadingFSImage(); readLock(); try { checkOperation(OperationCategory.READ); - return blockManager.getStoragePolicySuite(); + return blockManager.getStoragePolicies(); } finally { readUnlock(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index b05550d..bf1e055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -594,8 +594,8 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override - public BlockStoragePolicy[] getStoragePolicySuite() throws IOException { - return namesystem.getStoragePolicySuite(); + public BlockStoragePolicy[] getStoragePolicies() throws IOException { + return namesystem.getStoragePolicies(); } @Override // ClientProtocol http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index db0b0d3..525f6d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -614,7 +614,7 @@ public class DFSAdmin extends FsShell { System.out.println("The storage policy of " + argv[1] + " is unspecified"); return 0; } - BlockStoragePolicy[] policies = dfs.getStoragePolicySuite(); + BlockStoragePolicy[] policies = dfs.getStoragePolicies(); for (BlockStoragePolicy p : policies) { if (p.getId() == storagePolicyId) { System.out.println("The storage policy of " + argv[1] + ":\n" + p); http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java new file mode 100644 index 0000000..d2793eb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetStoragePolicies.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import java.io.IOException; + +/** + * A tool listing all the existing block storage policies. No argument is + * required when using this tool. + */ +public class GetStoragePolicies extends Configured implements Tool { + + @Override + public int run(String[] args) throws Exception { + FileSystem fs = FileSystem.get(getConf()); + if (!(fs instanceof DistributedFileSystem)) { + System.err.println("GetStoragePolicies can only be used against HDFS. " + + "Please check the default FileSystem setting in your configuration."); + return 1; + } + DistributedFileSystem dfs = (DistributedFileSystem) fs; + + try { + BlockStoragePolicy[] policies = dfs.getStoragePolicies(); + System.out.println("Block Storage Policies:"); + for (BlockStoragePolicy policy : policies) { + if (policy != null) { + System.out.println("\t" + policy); + } + } + } catch (IOException e) { + String[] content = e.getLocalizedMessage().split("\n"); + System.err.println("GetStoragePolicies: " + content[0]); + return 1; + } + return 0; + } + + public static void main(String[] args) throws Exception { + int rc = ToolRunner.run(new GetStoragePolicies(), args); + System.exit(rc); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index ce7bf1c..e09f142 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -108,10 +108,10 @@ message SetStoragePolicyRequestProto { message SetStoragePolicyResponseProto { // void response } -message GetStoragePolicySuiteRequestProto { // void request +message GetStoragePoliciesRequestProto { // void request } -message GetStoragePolicySuiteResponseProto { +message GetStoragePoliciesResponseProto { repeated BlockStoragePolicyProto policies = 1; } @@ -706,8 +706,8 @@ service ClientNamenodeProtocol { returns(SetReplicationResponseProto); rpc setStoragePolicy(SetStoragePolicyRequestProto) returns(SetStoragePolicyResponseProto); - rpc getStoragePolicySuite(GetStoragePolicySuiteRequestProto) - returns(GetStoragePolicySuiteResponseProto); + rpc getStoragePolicies(GetStoragePoliciesRequestProto) + returns(GetStoragePoliciesResponseProto); rpc setPermission(SetPermissionRequestProto) returns(SetPermissionResponseProto); rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto); http://git-wip-us.apache.org/repos/asf/hadoop/blob/428a7666/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 38ffcee..39d1439 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -73,11 +73,14 @@ public class TestBlockStoragePolicy { public void testDefaultPolicies() { final Map expectedPolicyStrings = new HashMap(); expectedPolicyStrings.put(COLD, - "BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], creationFallbacks=[], replicationFallbacks=[]"); + "BlockStoragePolicy{COLD:4, storageTypes=[ARCHIVE], " + + "creationFallbacks=[], replicationFallbacks=[]}"); expectedPolicyStrings.put(WARM, - "BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]"); + "BlockStoragePolicy{WARM:8, storageTypes=[DISK, ARCHIVE], " + + "creationFallbacks=[DISK, ARCHIVE], replicationFallbacks=[DISK, ARCHIVE]}"); expectedPolicyStrings.put(HOT, - "BlockStoragePolicy{HOT:12, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]"); + "BlockStoragePolicy{HOT:12, storageTypes=[DISK], " + + "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}"); for(byte i = 1; i < 16; i++) { final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); @@ -1102,7 +1105,7 @@ public class TestBlockStoragePolicy { cluster.waitActive(); final DistributedFileSystem fs = cluster.getFileSystem(); try { - BlockStoragePolicy[] policies = fs.getStoragePolicySuite(); + BlockStoragePolicy[] policies = fs.getStoragePolicies(); Assert.assertEquals(3, policies.length); Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(), policies[0].toString());