Return-Path: Delivered-To: apmail-hadoop-core-commits-archive@www.apache.org Received: (qmail 55847 invoked from network); 6 Feb 2009 20:22:48 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 6 Feb 2009 20:22:48 -0000 Received: (qmail 4799 invoked by uid 500); 6 Feb 2009 20:22:47 -0000 Delivered-To: apmail-hadoop-core-commits-archive@hadoop.apache.org Received: (qmail 4775 invoked by uid 500); 6 Feb 2009 20:22:47 -0000 Mailing-List: contact core-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: core-dev@hadoop.apache.org Delivered-To: mailing list core-commits@hadoop.apache.org Received: (qmail 4766 invoked by uid 99); 6 Feb 2009 20:22:47 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 06 Feb 2009 12:22:47 -0800 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 06 Feb 2009 20:22:44 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 7BC0A2388896; Fri, 6 Feb 2009 20:22:23 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r741704 - in /hadoop/core/branches/branch-0.20: ./ src/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/test/org/apache/hadoop/hdfs/ src/test/org/apache/hadoop/hdfs/server/datanode/ src/test/org/apache/hadoop/hdfs/server/namenode/ Date: Fri, 06 Feb 2009 20:22:23 -0000 To: core-commits@hadoop.apache.org From: rangadi@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20090206202223.7BC0A2388896@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: rangadi Date: Fri Feb 6 20:22:22 2009 New Revision: 741704 URL: http://svn.apache.org/viewvc?rev=741704&view=rev Log: HADOOP-5114. Remove timeout for accept() in DataNode. This makes accept() fail in JDK on Windows and causes many tests to fail. (Raghu Angadi) Modified: hadoop/core/branches/branch-0.20/ (props changed) hadoop/core/branches/branch-0.20/CHANGES.txt (contents, props changed) hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Propchange: hadoop/core/branches/branch-0.20/ ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Fri Feb 6 20:22:22 2009 @@ -1,2 +1,2 @@ /hadoop/core/branches/branch-0.19:713112 -/hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,736426,738328,738697,740077,740157 +/hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,736426,738328,738697,740077,740157,741703 Modified: hadoop/core/branches/branch-0.20/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=741704&r1=741703&r2=741704&view=diff ============================================================================== --- hadoop/core/branches/branch-0.20/CHANGES.txt (original) +++ hadoop/core/branches/branch-0.20/CHANGES.txt Fri Feb 6 20:22:22 2009 @@ -1646,6 +1646,13 @@ HADOOP-4500. Fix MultiFileSplit to get the FileSystem from the relevant path rather than the JobClient. (Joydeep Sen Sarma via cdouglas) +Release 0.18.4 - Unreleased + + BUG FIXES + + HADOOP-5114. Remove timeout for accept() in DataNode. This makes accept() + fail in JDK on Windows and causes many tests to fail. (Raghu Angadi) + Release 0.18.3 - 2009-01-27 IMPROVEMENTS Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Fri Feb 6 20:22:22 2009 @@ -1,3 +1,3 @@ /hadoop/core/branches/branch-0.18/CHANGES.txt:727226 /hadoop/core/branches/branch-0.19/CHANGES.txt:713112 -/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157 +/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703 Modified: hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=741704&r1=741703&r2=741704&view=diff ============================================================================== --- hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Feb 6 20:22:22 2009 @@ -307,7 +307,6 @@ ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); - ss.setSoTimeout(conf.getInt("dfs.dataXceiver.timeoutInMS", 30000)); //30s // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), @@ -593,6 +592,11 @@ } catch (InterruptedException e) {} } } + // wait for dataXceiveServer to terminate + try { + this.dataXceiverServer.join(); + } catch (InterruptedException ie) { + } } RPC.stopProxy(namenode); // stop the RPC threads @@ -1176,12 +1180,6 @@ } } - // wait for dataXceiveServer to terminate - try { - this.dataXceiverServer.join(); - } catch (InterruptedException ie) { - } - LOG.info(dnRegistration + ":Finishing DataNode in: "+data); shutdown(); } Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=741704&r1=741703&r2=741704&view=diff ============================================================================== --- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Feb 6 20:22:22 2009 @@ -602,7 +602,7 @@ /* * Shutdown a particular datanode */ - DataNodeProperties stopDataNode(int i) { + public DataNodeProperties stopDataNode(int i) { if (i < 0 || i >= dataNodes.size()) { return null; } Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=741704&r1=741703&r2=741704&view=diff ============================================================================== --- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original) +++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Feb 6 20:22:22 2009 @@ -37,10 +37,18 @@ /** Test if a datanode can correctly handle errors during block read/write*/ public class TestDiskError extends TestCase { public void testShutdown() throws Exception { + if (System.getProperty("os.name").startsWith("Windows")) { + /** + * This test depends on OS not allowing file creations on a directory + * that does not have write permissions for the user. Apparently it is + * not the case on Windows (at least under Cygwin), and possibly AIX. + * This is disabled on Windows. + */ + return; + } // bring up a cluster of 3 Configuration conf = new Configuration(); conf.setLong("dfs.block.size", 512L); - conf.setInt("dfs.dataXceiver.timeoutInMS", 1000); MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); Modified: hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=741704&r1=741703&r2=741704&view=diff ============================================================================== --- hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java (original) +++ hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Fri Feb 6 20:22:22 2009 @@ -9,6 +9,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; +import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -35,12 +36,20 @@ // corrupt the block on datanode 0 Block block = DFSTestUtil.getFirstBlock(fs, fileName); TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0); + DataNodeProperties dnProps = cluster.stopDataNode(0); // remove block scanner log to trigger block scanning File scanLog = new File(System.getProperty("test.build.data"), "dfs/data/data1/current/dncp_block_verification.log.curr"); - assertTrue(scanLog.delete()); + //wait for one minute for deletion to succeed; + for(int i=0; !scanLog.delete(); i++) { + assertTrue("Could not delete log file in one minute", i < 60); + try { + Thread.sleep(1000); + } catch (InterruptedException ignored) {} + } + // restart the datanode so the corrupt replica will be detected - cluster.restartDataNode(0); + cluster.restartDataNode(dnProps); DFSTestUtil.waitReplication(fs, fileName, (short)2); final DatanodeID corruptDataNode =