Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 6F70B97F8 for ; Fri, 9 Mar 2012 13:17:27 +0000 (UTC) Received: (qmail 94517 invoked by uid 500); 9 Mar 2012 13:17:27 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 94465 invoked by uid 500); 9 Mar 2012 13:17:27 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 94454 invoked by uid 99); 9 Mar 2012 13:17:26 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 09 Mar 2012 13:17:26 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 09 Mar 2012 13:17:25 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 541B92388980; Fri, 9 Mar 2012 13:17:05 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1298820 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Date: Fri, 09 Mar 2012 13:17:05 -0000 To: hdfs-commits@hadoop.apache.org From: stevel@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120309131705.541B92388980@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: stevel Date: Fri Mar 9 13:17:04 2012 New Revision: 1298820 URL: http://svn.apache.org/viewvc?rev=1298820&view=rev Log: HDFS-2966 Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1298820&r1=1298819&r2=1298820&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Mar 9 13:17:04 2012 @@ -92,6 +92,8 @@ Trunk (unreleased changes) HDFS-3037. TestMulitipleNNDataBlockScanner#testBlockScannerAfterRestart is racy. (atm) + HDFS-2966 TestNameNodeMetrics tests can fail under load. (stevel) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1298820&r1=1298819&r2=1298820&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Mar 9 13:17:04 2012 @@ -62,6 +62,8 @@ public class TestNameNodeMetrics { // Number of datanodes in the cluster private static final int DATANODE_COUNT = 3; + private static final int WAIT_GAUGE_VALUE_RETRIES = 20; + static { CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); @@ -140,10 +142,8 @@ public class TestNameNodeMetrics { assertGauge("BlockCapacity", blockCapacity, rb); fs.delete(file, true); filesTotal--; // reduce the filecount for deleted file - - waitForDeletion(); - rb = getMetrics(NS_METRICS); - assertGauge("FilesTotal", filesTotal, rb); + + rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal); assertGauge("BlocksTotal", 0L, rb); assertGauge("PendingDeletionBlocks", 0L, rb); @@ -176,9 +176,7 @@ public class TestNameNodeMetrics { assertGauge("PendingReplicationBlocks", 1L, rb); assertGauge("ScheduledReplicationBlocks", 1L, rb); fs.delete(file, true); - waitForDeletion(); - rb = getMetrics(NS_METRICS); - assertGauge("CorruptBlocks", 0L, rb); + rb = waitForDnMetricValue(NS_METRICS, "CorruptBlocks", 0L); assertGauge("PendingReplicationBlocks", 0L, rb); assertGauge("ScheduledReplicationBlocks", 0L, rb); } @@ -219,8 +217,7 @@ public class TestNameNodeMetrics { assertGauge("UnderReplicatedBlocks", 1L, rb); assertGauge("MissingBlocks", 1L, rb); fs.delete(file, true); - waitForDeletion(); - assertGauge("UnderReplicatedBlocks", 0L, getMetrics(NS_METRICS)); + waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L); } private void waitForDeletion() throws InterruptedException { @@ -228,7 +225,44 @@ public class TestNameNodeMetrics { // the blocks pending deletion are sent for deletion to the datanodes. Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000); } - + + /** + * Wait for the named gauge value from the metrics source to reach the + * desired value. + * + * There's an initial delay then a spin cycle of sleep and poll. Because + * all the tests use a shared FS instance, these tests are not independent; + * that's why the initial sleep is in there. + * + * @param source metrics source + * @param name gauge name + * @param expected expected value + * @return the last metrics record polled + * @throws Exception if something went wrong. + */ + private MetricsRecordBuilder waitForDnMetricValue(String source, + String name, + long expected) + throws Exception { + MetricsRecordBuilder rb; + long gauge; + //initial wait. + waitForDeletion(); + //lots of retries are allowed for slow systems; fast ones will still + //exit early + int retries = (DATANODE_COUNT + 1) * WAIT_GAUGE_VALUE_RETRIES; + rb = getMetrics(source); + gauge = MetricsAsserts.getLongGauge(name, rb); + while (gauge != expected && (--retries > 0)) { + Thread.sleep(DFS_REPLICATION_INTERVAL * 500); + rb = getMetrics(source); + gauge = MetricsAsserts.getLongGauge(name, rb); + } + //at this point the assertion is valid or the retry count ran out + assertGauge(name, expected, rb); + return rb; + } + @Test public void testRenameMetrics() throws Exception { Path src = getTestPath("src");