Return-Path: Delivered-To: apmail-hadoop-core-commits-archive@www.apache.org Received: (qmail 57166 invoked from network); 11 Mar 2009 19:44:17 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.2) by minotaur.apache.org with SMTP; 11 Mar 2009 19:44:17 -0000 Received: (qmail 86641 invoked by uid 500); 11 Mar 2009 19:44:11 -0000 Delivered-To: apmail-hadoop-core-commits-archive@hadoop.apache.org Received: (qmail 86535 invoked by uid 500); 11 Mar 2009 19:44:10 -0000 Mailing-List: contact core-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: core-dev@hadoop.apache.org Delivered-To: mailing list core-commits@hadoop.apache.org Received: (qmail 86499 invoked by uid 99); 11 Mar 2009 19:44:10 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 11 Mar 2009 12:44:10 -0700 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 11 Mar 2009 19:44:09 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 80FE12388995; Wed, 11 Mar 2009 19:43:49 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r752593 - in /hadoop/core/branches/branch-0.19: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DFSClient.java src/test/org/apache/hadoop/hdfs/TestFileCreation.java Date: Wed, 11 Mar 2009 19:43:49 -0000 To: core-commits@hadoop.apache.org From: dhruba@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20090311194349.80FE12388995@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: dhruba Date: Wed Mar 11 19:43:48 2009 New Revision: 752593 URL: http://svn.apache.org/viewvc?rev=752593&view=rev Log: HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba) Modified: hadoop/core/branches/branch-0.19/CHANGES.txt (contents, props changed) hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Modified: hadoop/core/branches/branch-0.19/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=752593&r1=752592&r2=752593&view=diff ============================================================================== --- hadoop/core/branches/branch-0.19/CHANGES.txt (original) +++ hadoop/core/branches/branch-0.19/CHANGES.txt Wed Mar 11 19:43:48 2009 @@ -60,6 +60,8 @@ HADOOP-5333. libhdfs supports appending to files. (dhruba) + HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba) + Release 0.19.1 - 2009-02-23 INCOMPATIBLE CHANGES Propchange: hadoop/core/branches/branch-0.19/CHANGES.txt ------------------------------------------------------------------------------ --- svn:mergeinfo (original) +++ svn:mergeinfo Wed Mar 11 19:43:48 2009 @@ -1,2 +1,3 @@ /hadoop/core/branches/branch-0.18/CHANGES.txt:727226 -/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,735082,736426,738697,740077,741703,741762,743296,743745,743892,745180,746902-746903,752073 +/hadoop/core/branches/branch-0.20/CHANGES.txt:752591 +/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888,720602,723013,723460,723831,723918,724883,727117,727212,727217,727228,727869,732572,732777,733887,734870,735082,736426,738697,740077,741703,741762,743296,743745,743892,745180,746902-746903,752073,752590 Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=752593&r1=752592&r2=752593&view=diff ============================================================================== --- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Wed Mar 11 19:43:48 2009 @@ -205,8 +205,8 @@ */ public synchronized void close() throws IOException { if(clientRunning) { - clientRunning = false; leasechecker.close(); + clientRunning = false; // close connections to the namenode RPC.stopProxy(rpcNamenode); @@ -2140,7 +2140,6 @@ private volatile boolean closed = false; public void run() { - while (!closed && clientRunning) { // if the Responder encountered an error, shutdown Responder @@ -2475,21 +2474,30 @@ // The original bad datanode is left in the list because it is // conservative to remove only one datanode in one iteration. for (int j = 0; j < nodes.length; j++) { - if (nodes[j] == primaryNode) { + if (nodes[j].equals(primaryNode)) { errorIndex = j; // forget original bad node. } } + // remove primary node from list + newnodes = new DatanodeInfo[nodes.length-1]; + System.arraycopy(nodes, 0, newnodes, 0, errorIndex); + System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex, + newnodes.length-errorIndex); + nodes = newnodes; LOG.warn("Error Recovery for block " + block + " failed " + " because recovery from primary datanode " + primaryNode + " failed " + recoveryErrorCount + - " times. Marking primary datanode as bad."); + " times. " + " Pipeline was " + pipelineMsg + + ". Marking primary datanode as bad."); recoveryErrorCount = 0; + errorIndex = -1; return true; // sleep when we return from here } String emsg = "Error Recovery for block " + block + " failed " + " because recovery from primary datanode " + primaryNode + " failed " + recoveryErrorCount + - " times. Aborting..."; + " times. " + " Pipeline was " + pipelineMsg + + ". Aborting..."; LOG.warn(emsg); lastException = new IOException(emsg); closed = true; @@ -2499,7 +2507,8 @@ LOG.warn("Error Recovery for block " + block + " failed " + " because recovery from primary datanode " + primaryNode + " failed " + recoveryErrorCount + - " times. Will retry..."); + " times. " + " Pipeline was " + pipelineMsg + + ". Will retry..."); return true; // sleep when we return from here } finally { RPC.stopProxy(primary); Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreation.java URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreation.java?rev=752593&r1=752592&r2=752593&view=diff ============================================================================== --- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreation.java (original) +++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Wed Mar 11 19:43:48 2009 @@ -56,6 +56,7 @@ //((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); } static final long seed = 0xDEADBEEFL; @@ -703,4 +704,31 @@ System.out.println("testLeaseExpireHardLimit successful"); } + + // test closing file system before all file handles are closed. + public void testFsClose() throws Exception { + System.out.println("test file system close start"); + final int DATANODE_NUM = 3; + + Configuration conf = new Configuration(); + + // create cluster + MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null); + DistributedFileSystem dfs = null; + try { + cluster.waitActive(); + dfs = (DistributedFileSystem)cluster.getFileSystem(); + + // create a new file. + final String f = DIR + "foofs"; + final Path fpath = new Path(f); + FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM); + out.write("something".getBytes()); + + // close file system without closing file + dfs.close(); + } finally { + System.out.println("testFsClose successful"); + } + } }