Return-Path: Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: (qmail 94379 invoked from network); 4 Mar 2011 04:24:46 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 4 Mar 2011 04:24:46 -0000 Received: (qmail 95418 invoked by uid 500); 4 Mar 2011 04:24:45 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 95390 invoked by uid 500); 4 Mar 2011 04:24:45 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 95364 invoked by uid 99); 4 Mar 2011 04:24:45 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 04 Mar 2011 04:24:45 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 04 Mar 2011 04:24:42 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id B53F923888E4; Fri, 4 Mar 2011 04:24:21 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1077526 - /hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Date: Fri, 04 Mar 2011 04:24:21 -0000 To: common-commits@hadoop.apache.org From: omalley@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110304042421.B53F923888E4@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: omalley Date: Fri Mar 4 04:24:21 2011 New Revision: 1077526 URL: http://svn.apache.org/viewvc?rev=1077526&view=rev Log: commit b8bdc99139116e60bcb459297174bdca166fb95f Author: Suresh Srinivas Date: Fri Jul 2 16:30:01 2010 -0700 HDFS-1250 from https://issues.apache.org/jira/secure/attachment/12448525/HDFS-1250.y20.patch Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1077526&r1=1077525&r2=1077526&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original) +++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Fri Mar 4 04:24:21 2011 @@ -26,9 +26,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; @@ -54,10 +54,10 @@ public class TestDeadDatanode { * wait for datanode to reach alive or dead state for waitTime given in * milliseconds. */ - private void waitForDatanodeState(String nodeID, boolean alive, int waitTime) - throws TimeoutException, InterruptedException { + private void waitForDatanodeState(DatanodeID nodeID, boolean alive, int waitTime) + throws TimeoutException, InterruptedException, IOException { long stopTime = System.currentTimeMillis() + waitTime; - FSNamesystem namesystem = cluster.getNamesystem(); + FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); String state = alive ? "alive" : "dead"; while (System.currentTimeMillis() < stopTime) { if (namesystem.getDatanode(nodeID).isAlive == alive) { @@ -80,8 +80,8 @@ public class TestDeadDatanode { */ @Test public void testDeadDatanode() throws Exception { - Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); + Configuration conf = new Configuration(); + conf.setInt("heartbeat.recheck.interval", 500); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); cluster = new MiniDFSCluster(conf, 1, true, null); cluster.waitActive(); @@ -89,12 +89,12 @@ public class TestDeadDatanode { // wait for datanode to be marked live DataNode dn = cluster.getDataNodes().get(0); DatanodeRegistration reg = cluster.getDataNodes().get(0) - .getDatanodeRegistration(); - waitForDatanodeState(reg.getStorageID(), true, 20000); + .dnRegistration; + waitForDatanodeState(reg, true, 20000); // Shutdown and wait for datanode to be marked dead dn.shutdown(); - waitForDatanodeState(reg.getStorageID(), false, 20000); + waitForDatanodeState(reg, false, 20000); DatanodeProtocol dnp = cluster.getNameNode(); Block block = new Block(0);