hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jgho...@apache.org
Subject svn commit: r937904 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Date Sun, 25 Apr 2010 22:56:28 GMT
Author: jghoman
Date: Sun Apr 25 22:56:28 2010
New Revision: 937904

URL: http://svn.apache.org/viewvc?rev=937904&view=rev
Log:
HDFS-1054.  Remove unnecessary sleep after failure in nextBlockOutputStream. Todd Lipcon via
jghoman.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=937904&r1=937903&r2=937904&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Sun Apr 25 22:56:28 2010
@@ -150,6 +150,9 @@ Trunk (unreleased changes)
 
     HDFS-666. Unit test for FsShell -text. (cdouglas via jghoman)
 
+    HDFS-1054. Remove unnecessary sleep after failure in nextBlockOutputStream.
+    (Todd Lipcon via jghoman)
+
   OPTIMIZATIONS
 
     HDFS-946. NameNode should not return full path name when lisitng a

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=937904&r1=937903&r2=937904&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Sun Apr 25 22:56:28
2010
@@ -796,7 +796,6 @@ class DFSOutputStream extends FSOutputSu
      */
     private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException {
       LocatedBlock lb = null;
-      boolean retry = false;
       DatanodeInfo[] nodes = null;
       int count = conf.getInt("dfs.client.block.write.retries", 3);
       boolean success = false;
@@ -804,7 +803,6 @@ class DFSOutputStream extends FSOutputSu
         hasError = false;
         lastException = null;
         errorIndex = -1;
-        retry = false;
         success = false;
 
         long startTime = System.currentTimeMillis();
@@ -825,22 +823,10 @@ class DFSOutputStream extends FSOutputSu
           DFSClient.LOG.info("Abandoning block " + block);
           dfsClient.namenode.abandonBlock(block, src, dfsClient.clientName);
           block = null;
-
-          DFSClient.LOG.debug("Excluding datanode " + nodes[errorIndex]);
+          DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
           excludedNodes.add(nodes[errorIndex]);
-
-          // Connection failed.  Let's wait a little bit and retry
-          retry = true;
-          try {
-            if (System.currentTimeMillis() - startTime > 5000) {
-              DFSClient.LOG.info("Waiting to find target node: " + nodes[0].getName());
-            }
-            //TODO fix this timout. Extract it o a constant, maybe make it available from
conf
-            Thread.sleep(6000);
-          } catch (InterruptedException iex) {
-          }
         }
-      } while (retry && --count >= 0);
+      } while (!success && --count >= 0);
 
       if (!success) {
         throw new IOException("Unable to create new block.");



Mime
View raw message