hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hair...@apache.org
Subject svn commit: r828926 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
Date Fri, 23 Oct 2009 03:57:16 GMT
Author: hairong
Date: Fri Oct 23 03:57:15 2009
New Revision: 828926

URL: http://svn.apache.org/viewvc?rev=828926&view=rev
Log:
HDFS-690. TestAppend2#testComplexAppend failed on "Too many open files". Contributed by Hairong
Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=828926&r1=828925&r2=828926&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Oct 23 03:57:15 2009
@@ -437,6 +437,9 @@
     HDFS-722. Fix callCreateBlockWriteStream pointcut in FSDatasetAspects.
     (szetszwo)
 
+    HDFS-690. TestAppend2#testComplexAppend failed on "Too many open files".
+    (hairong)
+
 Release 0.20.2 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=828926&r1=828925&r2=828926&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Fri
Oct 23 03:57:15 2009
@@ -816,7 +816,6 @@
             }
             Packet pkt = ackQueue.getFirst();
             long expected = pkt.seqno;
-            notifyAll();
             LOG.debug("PacketResponder " + numTargets +
                       " for block " + block + 
                       " acking for packet " + expected);
@@ -847,7 +846,9 @@
             replyOut.writeLong(expected);
             SUCCESS.write(replyOut);
             replyOut.flush();
-            ackQueue.removeFirst();
+            // remove the packet from the ack queue
+            removeAckHead();
+            // update the bytes acked
             if (pkt.lastByteInBlock>replicaInfo.getBytesAcked()) {
               replicaInfo.setBytesAcked(pkt.lastByteInBlock);
             }
@@ -923,7 +924,6 @@
                   }
                   pkt = ackQueue.getFirst();
                   expected = pkt.seqno;
-                  notifyAll();
                   LOG.debug("PacketResponder " + numTargets + " seqno = " + seqno);
                   if (seqno != expected) {
                     throw new IOException("PacketResponder " + numTargets +
@@ -1017,8 +1017,8 @@
                       " responded other status " + " for seqno " + expected);
 
             if (pkt != null) {
-              // remove the packet from the queue
-              ackQueue.removeFirst();
+              // remove the packet from the ack queue
+              removeAckHead();
               // update bytes acked
               if (success && pkt.lastByteInBlock>replicaInfo.getBytesAcked())
{
                 replicaInfo.setBytesAcked(pkt.lastByteInBlock);
@@ -1057,6 +1057,16 @@
       LOG.info("PacketResponder " + numTargets + 
                " for block " + block + " terminating");
     }
+    
+    /**
+     * Remove a packet from the head of the ack queue
+     * 
+     * This should be called only when the ack queue is not empty
+     */
+    private synchronized void removeAckHead() {
+      ackQueue.removeFirst();
+      notifyAll();
+    }
   }
   
   /**



Mime
View raw message