Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 075D27228 for ; Fri, 2 Sep 2011 21:46:04 +0000 (UTC) Received: (qmail 27455 invoked by uid 500); 2 Sep 2011 21:46:03 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 27417 invoked by uid 500); 2 Sep 2011 21:46:03 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 27404 invoked by uid 99); 2 Sep 2011 21:46:02 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 02 Sep 2011 21:46:02 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 02 Sep 2011 21:46:01 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 5D543238889B for ; Fri, 2 Sep 2011 21:45:41 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1164722 - in /hadoop/common/branches/branch-0.20-security: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DFSClient.java src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Date: Fri, 02 Sep 2011 21:45:41 -0000 To: common-commits@hadoop.apache.org From: suresh@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110902214541.5D543238889B@eris.apache.org> Author: suresh Date: Fri Sep 2 21:45:40 2011 New Revision: 1164722 URL: http://svn.apache.org/viewvc?rev=1164722&view=rev Log: Adding a patch that was missed in previous svn port of HDFS-630 change. Added: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/CHANGES.txt?rev=1164722&r1=1164721&r2=1164722&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.20-security/CHANGES.txt Fri Sep 2 21:45:40 2011 @@ -63,6 +63,9 @@ Release 0.20.205.0 - unreleased HDFS-988. Fix bug where savenameSpace can corrupt edits log. (Nicolas Spiegelberg via dhruba) + HDFS-1054. remove sleep before retry for allocating a block. + (Todd Lipcon via dhruba) + IMPROVEMENTS MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1164722&r1=1164721&r2=1164722&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Fri Sep 2 21:45:40 2011 @@ -3054,19 +3054,12 @@ public class DFSClient implements FSCons namenode.abandonBlock(block, src, clientName); if (errorIndex < nodes.length) { - LOG.debug("Excluding datanode " + nodes[errorIndex]); + LOG.info("Excluding datanode " + nodes[errorIndex]); excludedNodes.add(nodes[errorIndex]); } // Connection failed. Let's wait a little bit and retry retry = true; - try { - if (System.currentTimeMillis() - startTime > 5000) { - LOG.info("Waiting to find target node: " + nodes[0].getName()); - } - Thread.sleep(6000); - } catch (InterruptedException iex) { - } } } while (retry && --count >= 0); Added: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java?rev=1164722&view=auto ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java (added) +++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java Fri Sep 2 21:45:40 2011 @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.io.OutputStream; + +import junit.*; +import static junit.framework.Assert.fail; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + + +/** + * These tests make sure that DFSClient retries fetching data from DFS + * properly in case of errors. + */ +public class TestDFSClientExcludedNodes extends junit.framework.TestCase { + + public void testExcludedNodes() throws IOException { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null); + FileSystem fs = cluster.getFileSystem(); + Path filePath = new Path("/testExcludedNodes"); + + // kill a datanode + cluster.stopDataNode(AppendTestUtil.nextInt(3)); + OutputStream out = fs.create(filePath, true, 4096); + out.write(20); + + try { + out.close(); + } catch (Exception e) { + fail("DataNode failure should not result in a block abort: \n" + e.getMessage()); + } + } + +}