Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id C7C48D042 for ; Fri, 17 Aug 2012 01:43:20 +0000 (UTC) Received: (qmail 68380 invoked by uid 500); 17 Aug 2012 01:43:20 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 68349 invoked by uid 500); 17 Aug 2012 01:43:20 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 68338 invoked by uid 99); 17 Aug 2012 01:43:20 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 17 Aug 2012 01:43:20 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 17 Aug 2012 01:43:19 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id EA3EE23888EA; Fri, 17 Aug 2012 01:42:35 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1374122 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java Date: Fri, 17 Aug 2012 01:42:35 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120817014235.EA3EE23888EA@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Fri Aug 17 01:42:35 2012 New Revision: 1374122 URL: http://svn.apache.org/viewvc?rev=1374122&view=rev Log: HDFS-3788. ByteRangeInputStream should not expect HTTP Content-Length header when chunked transfer-encoding is used. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1374122&r1=1374121&r2=1374122&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Aug 17 01:42:35 2012 @@ -625,6 +625,9 @@ Branch-2 ( Unreleased changes ) HDFS-3808. fuse_dfs: postpone libhdfs intialization until after fork. (Colin Patrick McCabe via atm) + HDFS-3788. ByteRangeInputStream should not expect HTTP Content-Length header + when chunked transfer-encoding is used. (szetszwo) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java?rev=1374122&r1=1374121&r2=1374122&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ByteRangeInputStream.java Fri Aug 17 01:42:35 2012 @@ -22,12 +22,15 @@ import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; +import java.util.List; +import java.util.Map; +import java.util.StringTokenizer; import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.fs.FSInputStream; -import org.apache.hadoop.hdfs.server.namenode.StreamFile; import com.google.common.annotations.VisibleForTesting; +import com.google.common.net.HttpHeaders; /** * To support HTTP byte streams, a new connection to an HTTP server needs to be @@ -70,7 +73,7 @@ public abstract class ByteRangeInputStre protected URLOpener resolvedURL; protected long startPos = 0; protected long currentPos = 0; - protected long filelength; + protected Long fileLength = null; StreamStatus status = StreamStatus.SEEK; @@ -114,28 +117,60 @@ public abstract class ByteRangeInputStre final URLOpener opener = resolved? resolvedURL: originalURL; final HttpURLConnection connection = opener.connect(startPos, resolved); - final String cl = connection.getHeaderField(StreamFile.CONTENT_LENGTH); - if (cl == null) { - throw new IOException(StreamFile.CONTENT_LENGTH+" header is missing"); - } - final long streamlength = Long.parseLong(cl); - filelength = startPos + streamlength; - // Java has a bug with >2GB request streams. It won't bounds check - // the reads so the transfer blocks until the server times out - InputStream is = - new BoundedInputStream(connection.getInputStream(), streamlength); - resolvedURL.setURL(getResolvedUrl(connection)); - - return is; + + InputStream in = connection.getInputStream(); + final Map> headers = connection.getHeaderFields(); + if (isChunkedTransferEncoding(headers)) { + // file length is not known + fileLength = null; + } else { + // for non-chunked transfer-encoding, get content-length + final String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH); + if (cl == null) { + throw new IOException(HttpHeaders.CONTENT_LENGTH + " is missing: " + + headers); + } + final long streamlength = Long.parseLong(cl); + fileLength = startPos + streamlength; + + // Java has a bug with >2GB request streams. It won't bounds check + // the reads so the transfer blocks until the server times out + in = new BoundedInputStream(in, streamlength); + } + + return in; } + private static boolean isChunkedTransferEncoding( + final Map> headers) { + return contains(headers, HttpHeaders.TRANSFER_ENCODING, "chunked") + || contains(headers, HttpHeaders.TE, "chunked"); + } + + /** Does the HTTP header map contain the given key, value pair? */ + private static boolean contains(final Map> headers, + final String key, final String value) { + final List values = headers.get(key); + if (values != null) { + for(String v : values) { + for(final StringTokenizer t = new StringTokenizer(v, ","); + t.hasMoreTokens(); ) { + if (value.equalsIgnoreCase(t.nextToken())) { + return true; + } + } + } + } + return false; + } + private int update(final int n) throws IOException { if (n != -1) { currentPos += n; - } else if (currentPos < filelength) { + } else if (fileLength != null && currentPos < fileLength) { throw new IOException("Got EOF but currentPos = " + currentPos - + " < filelength = " + filelength); + + " < filelength = " + fileLength); } return n; }