Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5AC5C10231 for ; Mon, 10 Mar 2014 02:42:18 +0000 (UTC) Received: (qmail 18161 invoked by uid 500); 10 Mar 2014 02:42:16 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 18007 invoked by uid 500); 10 Mar 2014 02:42:13 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 17999 invoked by uid 99); 10 Mar 2014 02:42:11 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 10 Mar 2014 02:42:11 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 10 Mar 2014 02:42:08 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 2ADB023889CB; Mon, 10 Mar 2014 02:41:46 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1575797 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java src/test/java/org/apache/hadoop/hdfs/TestRead.java Date: Mon, 10 Mar 2014 02:41:45 -0000 To: hdfs-commits@hadoop.apache.org From: cmccabe@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140310024146.2ADB023889CB@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cmccabe Date: Mon Mar 10 02:41:45 2014 New Revision: 1575797 URL: http://svn.apache.org/r1575797 Log: HDFS-6071. BlockReaderLocal does not return -1 on EOF when doing a zero-length read on a short file. (cmccabe) Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1575797&r1=1575796&r2=1575797&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Mar 10 02:41:45 2014 @@ -729,6 +729,9 @@ Release 2.4.0 - UNRELEASED HDFS-6078. TestIncrementalBlockReports is flaky. (Arpit Agarwal) + HDFS-6071. BlockReaderLocal doesn't return -1 on EOF when doing a + zero-length read on a short file (cmccabe) + BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1575797&r1=1575796&r2=1575797&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Mon Mar 10 02:41:45 2014 @@ -478,7 +478,7 @@ class BlockReaderLocal implements BlockR total += bb; if (buf.remaining() == 0) return total; } - boolean eof = false; + boolean eof = true, done = false; do { if (buf.isDirect() && (buf.remaining() >= maxReadaheadLength) && ((dataPos % bytesPerChecksum) == 0)) { @@ -493,20 +493,24 @@ class BlockReaderLocal implements BlockR buf.limit(oldLimit); } if (nRead < maxReadaheadLength) { - eof = true; + done = true; + } + if (nRead > 0) { + eof = false; } total += nRead; } else { // Slow lane: refill bounce buffer. if (fillDataBuf(canSkipChecksum)) { - eof = true; + done = true; } bb = drainDataBuf(buf); // drain bounce buffer if possible if (bb >= 0) { + eof = false; total += bb; } } - } while ((!eof) && (buf.remaining() > 0)); + } while ((!done) && (buf.remaining() > 0)); return (eof && total == 0) ? -1 : total; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1575797&r1=1575796&r2=1575797&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Mon Mar 10 02:41:45 2014 @@ -58,11 +58,14 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.unix.DomainSocket; +import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.VersionInfo; +import org.junit.Assume; import java.io.*; import java.net.*; @@ -1138,4 +1141,43 @@ public class DFSTestUtil { long c = (val + factor - 1) / factor; return c * factor; } + + /** + * A short-circuit test context which makes it easier to get a short-circuit + * configuration and set everything up. + */ + public static class ShortCircuitTestContext implements Closeable { + private final String testName; + private final TemporarySocketDirectory sockDir; + private boolean closed = false; + private boolean formerTcpReadsDisabled; + + public ShortCircuitTestContext(String testName) { + this.testName = testName; + this.sockDir = new TemporarySocketDirectory(); + DomainSocket.disableBindPathValidation(); + formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting; + Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null); + } + + public Configuration newConfiguration() { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); + conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, + new File(sockDir.getDir(), + testName + "._PORT.sock").getAbsolutePath()); + return conf; + } + + public String getTestName() { + return testName; + } + + public void close() throws IOException { + if (closed) return; + closed = true; + DFSInputStream.tcpReadsDisabledForTesting = formerTcpReadsDisabled; + sockDir.close(); + } + } } Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java?rev=1575797&view=auto ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java (added) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java Mon Mar 10 02:41:45 2014 @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSTestUtil.ShortCircuitTestContext; +import org.junit.Test; + +public class TestRead { + final private int BLOCK_SIZE = 512; + + private void testEOF(MiniDFSCluster cluster, int fileLength) throws IOException { + FileSystem fs = cluster.getFileSystem(); + Path path = new Path("testEOF." + fileLength); + DFSTestUtil.createFile(fs, path, fileLength, (short)1, 0xBEEFBEEF); + FSDataInputStream fis = fs.open(path); + ByteBuffer empty = ByteBuffer.allocate(0); + // A read into an empty bytebuffer at the beginning of the file gives 0. + Assert.assertEquals(0, fis.read(empty)); + fis.seek(fileLength); + // A read into an empty bytebuffer at the end of the file gives -1. + Assert.assertEquals(-1, fis.read(empty)); + if (fileLength > BLOCK_SIZE) { + fis.seek(fileLength - BLOCK_SIZE + 1); + ByteBuffer dbb = ByteBuffer.allocateDirect(BLOCK_SIZE); + Assert.assertEquals(BLOCK_SIZE - 1, fis.read(dbb)); + } + fis.close(); + } + + @Test(timeout=60000) + public void testEOFWithBlockReaderLocal() throws Exception { + ShortCircuitTestContext testContext = + new ShortCircuitTestContext("testEOFWithBlockReaderLocal"); + try { + final Configuration conf = testContext.newConfiguration(); + conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .format(true).build(); + testEOF(cluster, 1); + testEOF(cluster, 14); + testEOF(cluster, 10000); + cluster.shutdown(); + } finally { + testContext.close(); + } + } + + @Test(timeout=60000) + public void testEOFWithRemoteBlockReader() throws Exception { + final Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .format(true).build(); + testEOF(cluster, 1); + testEOF(cluster, 14); + testEOF(cluster, 10000); + cluster.shutdown(); + } +}