Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9620D4B10 for ; Tue, 5 Jul 2011 15:32:25 +0000 (UTC) Received: (qmail 73941 invoked by uid 500); 5 Jul 2011 15:32:25 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 73852 invoked by uid 500); 5 Jul 2011 15:32:24 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 73748 invoked by uid 99); 5 Jul 2011 15:32:23 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 05 Jul 2011 15:32:23 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 05 Jul 2011 15:32:20 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 826F723888FE; Tue, 5 Jul 2011 15:31:58 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1143106 - in /hadoop/common/trunk/hdfs: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java Date: Tue, 05 Jul 2011 15:31:58 -0000 To: hdfs-commits@hadoop.apache.org From: eli@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110705153158.826F723888FE@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: eli Date: Tue Jul 5 15:31:58 2011 New Revision: 1143106 URL: http://svn.apache.org/viewvc?rev=1143106&view=rev Log: HDFS-1753. Resource Leak in StreamFile. Contributed by Uma Maheswara Rao G Modified: hadoop/common/trunk/hdfs/CHANGES.txt hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java Modified: hadoop/common/trunk/hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/CHANGES.txt?rev=1143106&r1=1143105&r2=1143106&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hdfs/CHANGES.txt Tue Jul 5 15:31:58 2011 @@ -862,6 +862,8 @@ Release 0.22.0 - Unreleased HDFS-528. Add ability for safemode to wait for a minimum number of live datanodes (Todd Lipcon via eli) + HDFS-1753. Resource Leak in StreamFile. (Uma Maheswara Rao G via eli) + IMPROVEMENTS HDFS-1304. Add a new unit test for HftpFileSystem.open(..). (szetszwo) Modified: hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=1143106&r1=1143105&r2=1143106&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original) +++ hadoop/common/trunk/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Tue Jul 5 15:31:58 2011 @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.io.PrintWriter; import java.util.Enumeration; @@ -85,31 +84,41 @@ public class StreamFile extends DfsServl return; } - final DFSInputStream in = dfs.open(filename); - final long fileLen = in.getFileLength(); - OutputStream os = response.getOutputStream(); + DFSInputStream in = null; + OutputStream out = null; try { + in = dfs.open(filename); + out = response.getOutputStream(); + final long fileLen = in.getFileLength(); if (reqRanges != null) { List ranges = InclusiveByteRange.satisfiableRanges(reqRanges, fileLen); - StreamFile.sendPartialData(in, os, response, fileLen, ranges, true); + StreamFile.sendPartialData(in, out, response, fileLen, ranges); } else { // No ranges, so send entire file response.setHeader("Content-Disposition", "attachment; filename=\"" + filename + "\""); response.setContentType("application/octet-stream"); response.setHeader(CONTENT_LENGTH, "" + fileLen); - StreamFile.copyFromOffset(in, os, 0L, fileLen, true); + StreamFile.copyFromOffset(in, out, 0L, fileLen); } - } catch (IOException e) { + in.close(); + in = null; + out.close(); + out = null; + dfs.close(); + dfs = null; + } catch (IOException ioe) { if (LOG.isDebugEnabled()) { - LOG.debug("response.isCommitted()=" + response.isCommitted(), e); + LOG.debug("response.isCommitted()=" + response.isCommitted(), ioe); } - throw e; + throw ioe; } finally { - dfs.close(); - } + IOUtils.cleanup(LOG, in); + IOUtils.cleanup(LOG, out); + IOUtils.cleanup(LOG, dfs); + } } /** @@ -122,15 +131,13 @@ public class StreamFile extends DfsServl * @param response http response to use * @param contentLength for the response header * @param ranges to write to respond with - * @param close whether to close the streams * @throws IOException on error sending the response */ static void sendPartialData(FSInputStream in, OutputStream out, HttpServletResponse response, long contentLength, - List ranges, - boolean close) + List ranges) throws IOException { if (ranges == null || ranges.size() != 1) { response.setContentLength(0); @@ -145,14 +152,14 @@ public class StreamFile extends DfsServl singleSatisfiableRange.toHeaderRangeString(contentLength)); copyFromOffset(in, out, singleSatisfiableRange.getFirst(contentLength), - singleLength, close); + singleLength); } } /* Copy count bytes at the given offset from one stream to another */ static void copyFromOffset(FSInputStream in, OutputStream out, long offset, - long count, boolean close) throws IOException { + long count) throws IOException { in.seek(offset); - IOUtils.copyBytes(in, out, count, close); + IOUtils.copyBytes(in, out, count, false); } } Modified: hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java?rev=1143106&r1=1143105&r2=1143106&view=diff ============================================================================== --- hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java (original) +++ hadoop/common/trunk/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java Tue Jul 5 15:31:58 2011 @@ -20,18 +20,30 @@ package org.apache.hadoop.hdfs.server.na import static org.junit.Assert.assertArrayEquals; import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.Enumeration; import java.util.List; import java.util.Vector; +import javax.servlet.ServletContext; +import javax.servlet.ServletOutputStream; +import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.junit.Test; import static org.junit.Assert.*; import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.mockito.Mockito; import org.mortbay.jetty.InclusiveByteRange; /* @@ -188,7 +200,29 @@ class MockHttpServletResponse implements public class TestStreamFile { + private HdfsConfiguration CONF = new HdfsConfiguration(); + private DFSClient clientMock = Mockito.mock(DFSClient.class); + private HttpServletRequest mockHttpServletRequest = + Mockito.mock(HttpServletRequest.class); + private HttpServletResponse mockHttpServletResponse = + Mockito.mock(HttpServletResponse.class); + private final ServletContext mockServletContext = + Mockito.mock(ServletContext.class); + + StreamFile sfile = new StreamFile() { + private static final long serialVersionUID = -5513776238875189473L; + + public ServletContext getServletContext() { + return mockServletContext; + } + @Override + protected DFSClient getDFSClient(HttpServletRequest request) + throws IOException, InterruptedException { + return clientMock; + } + }; + // return an array matching the output of mockfsinputstream private static byte[] getOutputArray(int start, int count) { byte[] a = new byte[count]; @@ -220,7 +254,7 @@ public class TestStreamFile { assertTrue("Pairs array must be even", pairs.length % 2 == 0); for (int i = 0; i < pairs.length; i+=2) { - StreamFile.copyFromOffset(fsin, os, pairs[i], pairs[i+1], false); + StreamFile.copyFromOffset(fsin, os, pairs[i], pairs[i+1]); assertArrayEquals("Reading " + pairs[i+1] + " bytes from offset " + pairs[i], getOutputArray(pairs[i], pairs[i+1]), @@ -246,7 +280,7 @@ public class TestStreamFile { { List ranges = strToRanges("0-,10-300", 500); MockHttpServletResponse response = new MockHttpServletResponse(); - StreamFile.sendPartialData(in, os, response, 500, ranges, false); + StreamFile.sendPartialData(in, os, response, 500, ranges); assertEquals("Multiple ranges should result in a 416 error", 416, response.getStatus()); } @@ -255,7 +289,7 @@ public class TestStreamFile { { os.reset(); MockHttpServletResponse response = new MockHttpServletResponse(); - StreamFile.sendPartialData(in, os, response, 500, null, false); + StreamFile.sendPartialData(in, os, response, 500, null); assertEquals("No ranges should result in a 416 error", 416, response.getStatus()); } @@ -264,7 +298,7 @@ public class TestStreamFile { { List ranges = strToRanges("600-800", 500); MockHttpServletResponse response = new MockHttpServletResponse(); - StreamFile.sendPartialData(in, os, response, 500, ranges, false); + StreamFile.sendPartialData(in, os, response, 500, ranges); assertEquals("Single (but invalid) range should result in a 416", 416, response.getStatus()); } @@ -274,7 +308,7 @@ public class TestStreamFile { { List ranges = strToRanges("100-300", 500); MockHttpServletResponse response = new MockHttpServletResponse(); - StreamFile.sendPartialData(in, os, response, 500, ranges, false); + StreamFile.sendPartialData(in, os, response, 500, ranges); assertEquals("Single (valid) range should result in a 206", 206, response.getStatus()); assertArrayEquals("Byte range from 100-300", @@ -283,4 +317,108 @@ public class TestStreamFile { } } + + + // Test for positive scenario + @Test + public void testDoGetShouldWriteTheFileContentIntoServletOutputStream() + throws Exception { + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1) + .build(); + try { + Path testFile = createFile(); + setUpForDoGetTest(cluster, testFile); + ServletOutputStreamExtn outStream = new ServletOutputStreamExtn(); + Mockito.doReturn(outStream).when(mockHttpServletResponse) + .getOutputStream(); + StreamFile sfile = new StreamFile() { + + private static final long serialVersionUID = 7715590481809562722L; + + public ServletContext getServletContext() { + return mockServletContext; + } + }; + sfile.doGet(mockHttpServletRequest, mockHttpServletResponse); + assertEquals("Not writing the file data into ServletOutputStream", + outStream.getResult(), "test"); + } finally { + cluster.shutdown(); + } + } + + // Test for cleaning the streams in exception cases also + @Test + public void testDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException() + throws Exception { + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1) + .build(); + try { + Path testFile = createFile(); + + setUpForDoGetTest(cluster, testFile); + + Mockito.doThrow(new IOException()).when(mockHttpServletResponse) + .getOutputStream(); + DFSInputStream fsMock = Mockito.mock(DFSInputStream.class); + + Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString()); + + Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength(); + + try { + sfile.doGet(mockHttpServletRequest, mockHttpServletResponse); + fail("Not throwing the IOException"); + } catch (IOException e) { + Mockito.verify(clientMock, Mockito.atLeastOnce()).close(); + } + + } finally { + cluster.shutdown(); + } + } + + private void setUpForDoGetTest(MiniDFSCluster cluster, Path testFile) + throws IOException { + + Mockito.doReturn(CONF).when(mockServletContext).getAttribute( + JspHelper.CURRENT_CONF); + Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF))) + .when(mockHttpServletRequest).getParameter("nnaddr"); + Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest) + .getPathInfo(); + } + + static Path writeFile(FileSystem fs, Path f) throws IOException { + DataOutputStream out = fs.create(f); + try { + out.writeBytes("test"); + } finally { + out.close(); + } + assertTrue(fs.exists(f)); + return f; + } + + private Path createFile() throws IOException { + FileSystem fs = FileSystem.get(CONF); + Path testFile = new Path("/test/mkdirs/doGet"); + writeFile(fs, testFile); + return testFile; + } + + public static class ServletOutputStreamExtn extends ServletOutputStream { + private StringBuffer buffer = new StringBuffer(3); + + public String getResult() { + return buffer.toString(); + } + + @Override + public void write(int b) throws IOException { + buffer.append((char) b); + } + } }