Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 92834173CA for ; Wed, 3 Jun 2015 08:55:37 +0000 (UTC) Received: (qmail 85728 invoked by uid 500); 3 Jun 2015 08:55:37 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 85652 invoked by uid 500); 3 Jun 2015 08:55:37 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 85643 invoked by uid 99); 3 Jun 2015 08:55:37 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 03 Jun 2015 08:55:37 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id CE8F4E0AA1; Wed, 3 Jun 2015 08:55:36 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: vinayakumarb@apache.org To: common-commits@hadoop.apache.org Date: Wed, 03 Jun 2015 08:55:36 -0000 Message-Id: <9b2f4b0576b1494c9d7d2d5a429482b5@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/2] hadoop git commit: HDFS-8470. fsimage loading progress should update inode, delegation token and cache pool count. (Contributed by surendra singh lilhore) Repository: hadoop Updated Branches: refs/heads/branch-2 066e45bcb -> 4bb725b4d refs/heads/trunk 54f83d9bd -> e965dcec3 HDFS-8470. fsimage loading progress should update inode, delegation token and cache pool count. (Contributed by surendra singh lilhore) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e965dcec Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e965dcec Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e965dcec Branch: refs/heads/trunk Commit: e965dcec378cb807856372425307598792977604 Parents: 54f83d9 Author: Vinayakumar B Authored: Wed Jun 3 14:24:55 2015 +0530 Committer: Vinayakumar B Committed: Wed Jun 3 14:24:55 2015 +0530 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/namenode/FSImageFormatPBINode.java | 15 ++++++++-- .../server/namenode/FSImageFormatProtobuf.java | 30 ++++++++++++++------ 3 files changed, 36 insertions(+), 12 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e965dcec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 402a547..8cbe0e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -840,6 +840,9 @@ Release 2.8.0 - UNRELEASED HDFS-8256. "-storagepolicies , -blockId ,-replicaDetails " options are missed out in usage and from documentation (J.Andreina via vinayakumarb) + HDFS-8470. fsimage loading progress should update inode, delegation token and + cache pool count. (surendra singh lilhore via vinayakumarb) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/e965dcec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 1c14220..e8378e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -58,6 +58,10 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFea import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.hdfs.util.ReadOnlyList; @@ -251,11 +255,15 @@ public final class FSImageFormatPBINode { } } - void loadINodeSection(InputStream in) throws IOException { + void loadINodeSection(InputStream in, StartupProgress prog, + Step currentStep) throws IOException { INodeSection s = INodeSection.parseDelimitedFrom(in); fsn.dir.resetLastInodeId(s.getLastInodeId()); - LOG.info("Loading " + s.getNumInodes() + " INodes."); - for (int i = 0; i < s.getNumInodes(); ++i) { + long numInodes = s.getNumInodes(); + LOG.info("Loading " + numInodes + " INodes."); + prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); + for (int i = 0; i < numInodes; ++i) { INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); if (p.getId() == INodeId.ROOT_INODE_ID) { loadRootINode(p); @@ -263,6 +271,7 @@ public final class FSImageFormatPBINode { INode n = loadINode(p); dir.addToInodeMap(n); } + counter.increment(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e965dcec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 24afcae..69e9bb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection; import org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.hdfs.util.MD5FileUtils; @@ -250,7 +251,7 @@ public final class FSImageFormatProtobuf { case INODE: { currentStep = new Step(StepType.INODES); prog.beginStep(Phase.LOADING_FSIMAGE, currentStep); - inodeLoader.loadINodeSection(in); + inodeLoader.loadINodeSection(in, prog, currentStep); } break; case INODE_REFERENCE: @@ -272,14 +273,14 @@ public final class FSImageFormatProtobuf { prog.endStep(Phase.LOADING_FSIMAGE, currentStep); Step step = new Step(StepType.DELEGATION_TOKENS); prog.beginStep(Phase.LOADING_FSIMAGE, step); - loadSecretManagerSection(in); + loadSecretManagerSection(in, prog, step); prog.endStep(Phase.LOADING_FSIMAGE, step); } break; case CACHE_MANAGER: { Step step = new Step(StepType.CACHE_POOLS); prog.beginStep(Phase.LOADING_FSIMAGE, step); - loadCacheManagerSection(in); + loadCacheManagerSection(in, prog, step); prog.endStep(Phase.LOADING_FSIMAGE, step); } break; @@ -316,7 +317,8 @@ public final class FSImageFormatProtobuf { } } - private void loadSecretManagerSection(InputStream in) throws IOException { + private void loadSecretManagerSection(InputStream in, StartupProgress prog, + Step currentStep) throws IOException { SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(in); int numKeys = s.getNumKeys(), numTokens = s.getNumTokens(); ArrayList keys = Lists @@ -327,20 +329,30 @@ public final class FSImageFormatProtobuf { for (int i = 0; i < numKeys; ++i) keys.add(SecretManagerSection.DelegationKey.parseDelimitedFrom(in)); - for (int i = 0; i < numTokens; ++i) + prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numTokens); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); + for (int i = 0; i < numTokens; ++i) { tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in)); + counter.increment(); + } fsn.loadSecretManagerState(s, keys, tokens); } - private void loadCacheManagerSection(InputStream in) throws IOException { + private void loadCacheManagerSection(InputStream in, StartupProgress prog, + Step currentStep) throws IOException { CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(in); - ArrayList pools = Lists.newArrayListWithCapacity(s - .getNumPools()); + int numPools = s.getNumPools(); + ArrayList pools = Lists + .newArrayListWithCapacity(numPools); ArrayList directives = Lists .newArrayListWithCapacity(s.getNumDirectives()); - for (int i = 0; i < s.getNumPools(); ++i) + prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numPools); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); + for (int i = 0; i < numPools; ++i) { pools.add(CachePoolInfoProto.parseDelimitedFrom(in)); + counter.increment(); + } for (int i = 0; i < s.getNumDirectives(); ++i) directives.add(CacheDirectiveInfoProto.parseDelimitedFrom(in)); fsn.getCacheManager().loadState(