Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 165CE10454 for ; Tue, 29 Oct 2013 21:01:00 +0000 (UTC) Received: (qmail 18595 invoked by uid 500); 29 Oct 2013 21:00:59 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 18561 invoked by uid 500); 29 Oct 2013 21:00:59 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 18553 invoked by uid 99); 29 Oct 2013 21:00:59 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 29 Oct 2013 21:00:59 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 29 Oct 2013 21:00:56 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 7387123889EC; Tue, 29 Oct 2013 21:00:35 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1536885 - in /hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Date: Tue, 29 Oct 2013 21:00:35 -0000 To: hdfs-commits@hadoop.apache.org From: cmccabe@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20131029210035.7387123889EC@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cmccabe Date: Tue Oct 29 21:00:34 2013 New Revision: 1536885 URL: http://svn.apache.org/r1536885 Log: HDFS-4657. Limit the number of blocks logged by the NN after a block report to a configurable value. (atm via cmccabe) Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1536885&r1=1536884&r2=1536885&view=diff ============================================================================== --- hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Oct 29 21:00:34 2013 @@ -15,6 +15,10 @@ Release 2.2.1 - UNRELEASED HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. (Vinayakumar B via umamahesh) + HDFS-4657. Limit the number of blocks logged by the NN after a block + report to a configurable value. (Aaron Twinning Meyers via Colin Patrick + McCabe) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1536885&r1=1536884&r2=1536885&view=diff ============================================================================== --- hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Oct 29 21:00:34 2013 @@ -490,6 +490,9 @@ public class DFSConfigKeys extends Commo public static final int DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT = 120000; public static final int DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT = 20000; + public static final String DFS_MAX_NUM_BLOCKS_TO_LOG_KEY = "dfs.namenode.max-num-blocks-to-log"; + public static final long DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT = 1000l; + public static final String DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY = "dfs.namenode.enable.retrycache"; public static final boolean DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT = true; public static final String DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.namenode.retrycache.expirytime.millis"; Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1536885&r1=1536884&r2=1536885&view=diff ============================================================================== --- hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Oct 29 21:00:34 2013 @@ -216,6 +216,9 @@ public class BlockManager { // whether or not to issue block encryption keys. final boolean encryptDataTransfer; + + // Max number of blocks to log info about during a block report. + private final long maxNumBlocksToLog; /** * When running inside a Standby node, the node may receive block reports @@ -297,6 +300,10 @@ public class BlockManager { conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); + this.maxNumBlocksToLog = + conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, + DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); + LOG.info("defaultReplication = " + defaultReplication); LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); @@ -304,6 +311,7 @@ public class BlockManager { LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); LOG.info("encryptDataTransfer = " + encryptDataTransfer); + LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog); } private static BlockTokenSecretManager createBlockTokenSecretManager( @@ -1700,8 +1708,14 @@ public class BlockManager { for (Block b : toRemove) { removeStoredBlock(b, node); } + int numBlocksLogged = 0; for (BlockInfo b : toAdd) { - addStoredBlock(b, node, null, true); + addStoredBlock(b, node, null, numBlocksLogged < maxNumBlocksToLog); + numBlocksLogged++; + } + if (numBlocksLogged > maxNumBlocksToLog) { + blockLog.info("BLOCK* processReport: logged info for " + maxNumBlocksToLog + + " of " + numBlocksLogged + " reported."); } for (Block b : toInvalidate) { blockLog.info("BLOCK* processReport: " @@ -2641,8 +2655,14 @@ assert storedBlock.findDatanode(dn) < 0 for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState); } + long numBlocksLogged = 0; for (BlockInfo b : toAdd) { - addStoredBlock(b, node, delHintNode, true); + addStoredBlock(b, node, delHintNode, numBlocksLogged < maxNumBlocksToLog); + numBlocksLogged++; + } + if (numBlocksLogged > maxNumBlocksToLog) { + blockLog.info("BLOCK* addBlock: logged info for " + maxNumBlocksToLog + + " of " + numBlocksLogged + " reported."); } for (Block b : toInvalidate) { blockLog.info("BLOCK* addBlock: block "