hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1157232 - in /hadoop/common/trunk/hdfs: CHANGES.txt src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
Date Fri, 12 Aug 2011 19:57:16 GMT
Author: eli
Date: Fri Aug 12 19:57:15 2011
New Revision: 1157232

URL: http://svn.apache.org/viewvc?rev=1157232&view=rev
Log:
HDFS-73. DFSOutputStream does not close all the sockets. Contributed by Uma Maheswara Rao
G

Modified:
    hadoop/common/trunk/hdfs/CHANGES.txt
    hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java

Modified: hadoop/common/trunk/hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1157232&r1=1157231&r2=1157232&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hdfs/CHANGES.txt Fri Aug 12 19:57:15 2011
@@ -964,6 +964,9 @@ Trunk (unreleased changes)
     HDFS-2240. Fix a deadlock in LeaseRenewer by enforcing lock acquisition
     ordering.  (szetszwo)
 
+    HDFS-73. DFSOutputStream does not close all the sockets.
+    (Uma Maheswara Rao G via eli)
+
   BREAKDOWN OF HDFS-1073 SUBTASKS
 
     HDFS-1521. Persist transaction ID on disk between NN restarts.

Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1157232&r1=1157231&r2=1157232&view=diff
==============================================================================
--- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Fri Aug
12 19:57:15 2011
@@ -36,7 +36,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -63,7 +62,6 @@ import org.apache.hadoop.hdfs.protocol.p
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -606,6 +604,7 @@ class DFSOutputStream extends FSOutputSu
         try {
           blockStream.close();
         } catch (IOException e) {
+          setLastException(e);
         } finally {
           blockStream = null;
         }
@@ -614,10 +613,20 @@ class DFSOutputStream extends FSOutputSu
         try {
           blockReplyStream.close();
         } catch (IOException e) {
+          setLastException(e);
         } finally {
           blockReplyStream = null;
         }
       }
+      if (null != s) {
+        try {
+          s.close();
+        } catch (IOException e) {
+          setLastException(e);
+        } finally {
+          s = null;
+        }
+      }
     }
 
     //
@@ -1003,16 +1012,20 @@ class DFSOutputStream extends FSOutputSu
       persistBlocks.set(true);
 
       boolean result = false;
+      DataOutputStream out = null;
       try {
+        assert null == s : "Previous socket unclosed";
         s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
         long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
 
         //
         // Xmit header info to datanode
         //
-        DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
+        out = new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(s, writeTimeout),
             FSConstants.SMALL_BUFFER_SIZE));
+        
+        assert null == blockReplyStream : "Previous blockReplyStream unclosed";
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
 
         // send the request
@@ -1038,7 +1051,7 @@ class DFSOutputStream extends FSOutputSu
                 + firstBadLink);
           }
         }
-
+        assert null == blockStream : "Previous blockStream unclosed";
         blockStream = out;
         result =  true; // success
 
@@ -1059,12 +1072,15 @@ class DFSOutputStream extends FSOutputSu
         }
         hasError = true;
         setLastException(ie);
-        blockReplyStream = null;
         result =  false;  // error
       } finally {
         if (!result) {
           IOUtils.closeSocket(s);
           s = null;
+          IOUtils.closeStream(out);
+          out = null;
+          IOUtils.closeStream(blockReplyStream);
+          blockReplyStream = null;
         }
       }
       return result;



Mime
View raw message