Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5CC6FCB7D for ; Fri, 27 Apr 2012 20:15:06 +0000 (UTC) Received: (qmail 50761 invoked by uid 500); 27 Apr 2012 20:15:06 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 50708 invoked by uid 500); 27 Apr 2012 20:15:06 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 50700 invoked by uid 99); 27 Apr 2012 20:15:06 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 27 Apr 2012 20:15:06 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 27 Apr 2012 20:15:04 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 3291923889E1; Fri, 27 Apr 2012 20:14:44 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1331572 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/ Date: Fri, 27 Apr 2012 20:14:43 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120427201444.3291923889E1@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Fri Apr 27 20:14:43 2012 New Revision: 1331572 URL: http://svn.apache.org/viewvc?rev=1331572&view=rev Log: svn merge -c 1331570 from trunk for HDFS-3334. Fix ByteRangeInputStream stream leakage. Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1331570 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1331572&r1=1331571&r2=1331572&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Apr 27 20:14:43 2012 @@ -798,6 +798,8 @@ Release 0.23.3 - UNRELEASED HDFS-3321. Fix safe mode turn off tip message. (Ravi Prakash via szetszwo) + HDFS-3334. Fix ByteRangeInputStream stream leakage. (Daryn Sharp via szetszwo) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1331570 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java?rev=1331572&r1=1331571&r2=1331572&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java Fri Apr 27 20:14:43 2012 @@ -27,6 +27,8 @@ import org.apache.commons.io.input.Bound import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.hdfs.server.namenode.StreamFile; +import com.google.common.annotations.VisibleForTesting; + /** * To support HTTP byte streams, a new connection to an HTTP server needs to be * created each time. This class hides the complexity of those multiple @@ -61,7 +63,7 @@ public abstract class ByteRangeInputStre } enum StreamStatus { - NORMAL, SEEK + NORMAL, SEEK, CLOSED } protected InputStream in; protected URLOpener originalURL; @@ -89,40 +91,51 @@ public abstract class ByteRangeInputStre protected abstract URL getResolvedUrl(final HttpURLConnection connection ) throws IOException; - private InputStream getInputStream() throws IOException { - if (status != StreamStatus.NORMAL) { - - if (in != null) { - in.close(); - in = null; - } - - // Use the original url if no resolved url exists, eg. if - // it's the first time a request is made. - final URLOpener opener = - (resolvedURL.getURL() == null) ? originalURL : resolvedURL; - - final HttpURLConnection connection = opener.openConnection(startPos); - connection.connect(); - checkResponseCode(connection); - - final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH); - if (cl == null) { - throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing"); - } - final long streamlength = Long.parseLong(cl); - filelength = startPos + streamlength; - // Java has a bug with >2GB request streams. It won't bounds check - // the reads so the transfer blocks until the server times out - in = new BoundedInputStream(connection.getInputStream(), streamlength); - - resolvedURL.setURL(getResolvedUrl(connection)); - status = StreamStatus.NORMAL; + @VisibleForTesting + protected InputStream getInputStream() throws IOException { + switch (status) { + case NORMAL: + break; + case SEEK: + if (in != null) { + in.close(); + } + in = openInputStream(); + status = StreamStatus.NORMAL; + break; + case CLOSED: + throw new IOException("Stream closed"); } - return in; } + @VisibleForTesting + protected InputStream openInputStream() throws IOException { + // Use the original url if no resolved url exists, eg. if + // it's the first time a request is made. + final URLOpener opener = + (resolvedURL.getURL() == null) ? originalURL : resolvedURL; + + final HttpURLConnection connection = opener.openConnection(startPos); + connection.connect(); + checkResponseCode(connection); + + final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH); + if (cl == null) { + throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing"); + } + final long streamlength = Long.parseLong(cl); + filelength = startPos + streamlength; + // Java has a bug with >2GB request streams. It won't bounds check + // the reads so the transfer blocks until the server times out + InputStream is = + new BoundedInputStream(connection.getInputStream(), streamlength); + + resolvedURL.setURL(getResolvedUrl(connection)); + + return is; + } + private int update(final int n) throws IOException { if (n != -1) { currentPos += n; @@ -150,17 +163,21 @@ public abstract class ByteRangeInputStre * The next read() will be from that location. Can't * seek past the end of the file. */ + @Override public void seek(long pos) throws IOException { if (pos != currentPos) { startPos = pos; currentPos = pos; - status = StreamStatus.SEEK; + if (status != StreamStatus.CLOSED) { + status = StreamStatus.SEEK; + } } } /** * Return the current offset from the start of the file */ + @Override public long getPos() throws IOException { return currentPos; } @@ -169,7 +186,17 @@ public abstract class ByteRangeInputStre * Seeks a different copy of the data. Returns true if * found a new source, false otherwise. */ + @Override public boolean seekToNewSource(long targetPos) throws IOException { return false; } -} \ No newline at end of file + + @Override + public void close() throws IOException { + if (in != null) { + in.close(); + in = null; + } + status = StreamStatus.CLOSED; + } +} Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java?rev=1331572&r1=1331571&r2=1331572&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java Fri Apr 27 20:14:43 2012 @@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -169,4 +171,74 @@ public static class MockHttpURLConnectio "HTTP_OK expected, received 206", e.getMessage()); } } + + @Test + public void testPropagatedClose() throws IOException { + ByteRangeInputStream brs = spy( + new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/"))); + + InputStream mockStream = mock(InputStream.class); + doReturn(mockStream).when(brs).openInputStream(); + + int brisOpens = 0; + int brisCloses = 0; + int isCloses = 0; + + // first open, shouldn't close underlying stream + brs.getInputStream(); + verify(brs, times(++brisOpens)).openInputStream(); + verify(brs, times(brisCloses)).close(); + verify(mockStream, times(isCloses)).close(); + + // stream is open, shouldn't close underlying stream + brs.getInputStream(); + verify(brs, times(brisOpens)).openInputStream(); + verify(brs, times(brisCloses)).close(); + verify(mockStream, times(isCloses)).close(); + + // seek forces a reopen, should close underlying stream + brs.seek(1); + brs.getInputStream(); + verify(brs, times(++brisOpens)).openInputStream(); + verify(brs, times(brisCloses)).close(); + verify(mockStream, times(++isCloses)).close(); + + // verify that the underlying stream isn't closed after a seek + // ie. the state was correctly updated + brs.getInputStream(); + verify(brs, times(brisOpens)).openInputStream(); + verify(brs, times(brisCloses)).close(); + verify(mockStream, times(isCloses)).close(); + + // seeking to same location should be a no-op + brs.seek(1); + brs.getInputStream(); + verify(brs, times(brisOpens)).openInputStream(); + verify(brs, times(brisCloses)).close(); + verify(mockStream, times(isCloses)).close(); + + // close should of course close + brs.close(); + verify(brs, times(++brisCloses)).close(); + verify(mockStream, times(++isCloses)).close(); + + // it's already closed, underlying stream should not close + brs.close(); + verify(brs, times(++brisCloses)).close(); + verify(mockStream, times(isCloses)).close(); + + // it's closed, don't reopen it + boolean errored = false; + try { + brs.getInputStream(); + } catch (IOException e) { + errored = true; + assertEquals("Stream closed", e.getMessage()); + } finally { + assertTrue("Read a closed steam", errored); + } + verify(brs, times(brisOpens)).openInputStream(); + verify(brs, times(brisCloses)).close(); + verify(mockStream, times(isCloses)).close(); + } } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1331572&r1=1331571&r2=1331572&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Fri Apr 27 20:14:43 2012 @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import java.io.InputStream; import java.net.URISyntaxException; import java.net.URI; import java.net.URL; @@ -234,6 +235,45 @@ public class TestHftpFileSystem { assertEquals('7', in.read()); } + @Test + public void testReadClosedStream() throws IOException { + final Path testFile = new Path("/testfile+2"); + FSDataOutputStream os = hdfs.create(testFile, true); + os.writeBytes("0123456789"); + os.close(); + + // ByteRangeInputStream delays opens until reads. Make sure it doesn't + // open a closed stream that has never been opened + FSDataInputStream in = hftpFs.open(testFile); + in.close(); + checkClosedStream(in); + checkClosedStream(in.getWrappedStream()); + + // force the stream to connect and then close it + in = hftpFs.open(testFile); + int ch = in.read(); + assertEquals('0', ch); + in.close(); + checkClosedStream(in); + checkClosedStream(in.getWrappedStream()); + + // make sure seeking doesn't automagically reopen the stream + in.seek(4); + checkClosedStream(in); + checkClosedStream(in.getWrappedStream()); + } + + private void checkClosedStream(InputStream is) { + IOException ioe = null; + try { + is.read(); + } catch (IOException e) { + ioe = e; + } + assertNotNull("No exception on closed read", ioe); + assertEquals("Stream closed", ioe.getMessage()); + } + public void resetFileSystem() throws IOException { // filesystem caching has a quirk/bug that it caches based on the user's // given uri. the result is if a filesystem is instantiated with no port,