hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r388225 - in /lucene/hadoop/trunk: bin/start-all.sh src/java/org/apache/hadoop/dfs/DFSClient.java
Date Thu, 23 Mar 2006 18:12:32 GMT
Author: cutting
Date: Thu Mar 23 10:12:28 2006
New Revision: 388225

URL: http://svn.apache.org/viewcvs?rev=388225&view=rev
Log:
Fix for HADOOP-83.

Modified:
    lucene/hadoop/trunk/bin/start-all.sh
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java

Modified: lucene/hadoop/trunk/bin/start-all.sh
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/bin/start-all.sh?rev=388225&r1=388224&r2=388225&view=diff
==============================================================================
--- lucene/hadoop/trunk/bin/start-all.sh (original)
+++ lucene/hadoop/trunk/bin/start-all.sh Thu Mar 23 10:12:28 2006
@@ -5,7 +5,13 @@
 bin=`dirname "$0"`
 bin=`cd "$bin"; pwd`
 
-"$bin"/hadoop-daemon.sh start namenode
+# start dfs daemons
+# start namenode after datanodes, to minimize time namenode is up w/o data
+# note: datanodes will log connection errors until namenode starts
 "$bin"/hadoop-daemons.sh start datanode
+"$bin"/hadoop-daemon.sh start namenode
+
+# start mapred daemons
+# start jobtracker first to minimize connection errors at startup
 "$bin"/hadoop-daemon.sh start jobtracker
 "$bin"/hadoop-daemons.sh start tasktracker

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?rev=388225&r1=388224&r2=388225&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Thu Mar 23 10:12:28
2006
@@ -40,7 +40,7 @@
  ********************************************************/
 class DFSClient implements FSConstants {
     public static final Logger LOG = LogFormatter.getLogger("org.apache.hadoop.fs.DFSClient");
-    static int MAX_BLOCK_ACQUIRE_FAILURES = 10;
+    static int MAX_BLOCK_ACQUIRE_FAILURES = 3;
     ClientProtocol namenode;
     String localName;
     boolean running = true;
@@ -358,17 +358,15 @@
                     chosenNode = bestNode(nodes[targetBlock], deadNodes);
                     targetAddr = DataNode.createSocketAddr(chosenNode.getName().toString());
                 } catch (IOException ie) {
-                    /**
                     if (failures >= MAX_BLOCK_ACQUIRE_FAILURES) {
                         throw new IOException("Could not obtain block " + blocks[targetBlock]);
                     }
-                    **/
                     if (nodes[targetBlock] == null || nodes[targetBlock].length == 0) {
                         LOG.info("No node available for block " + blocks[targetBlock]);
                     }
                     LOG.info("Could not obtain block from any node:  " + ie);
                     try {
-                        Thread.sleep(10000);
+                        Thread.sleep(3000);
                     } catch (InterruptedException iex) {
                     }
                     deadNodes.clear();



Mime
View raw message