Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 1ACBF200C17 for ; Fri, 10 Feb 2017 19:53:02 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 1933F160B5C; Fri, 10 Feb 2017 18:53:02 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 3CC89160B4E for ; Fri, 10 Feb 2017 19:53:01 +0100 (CET) Received: (qmail 96735 invoked by uid 500); 10 Feb 2017 18:53:00 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 96726 invoked by uid 99); 10 Feb 2017 18:53:00 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 10 Feb 2017 18:53:00 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 30E9BDFBDB; Fri, 10 Feb 2017 18:53:00 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kihwal@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp. Date: Fri, 10 Feb 2017 18:53:00 +0000 (UTC) archived-at: Fri, 10 Feb 2017 18:53:02 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2 c88ec5458 -> 33c62d2d1 HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33c62d2d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33c62d2d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33c62d2d Branch: refs/heads/branch-2 Commit: 33c62d2d19cd80b9c0cb9f46a635f37080dbb27c Parents: c88ec54 Author: Kihwal Lee Authored: Fri Feb 10 12:52:14 2017 -0600 Committer: Kihwal Lee Committed: Fri Feb 10 12:52:14 2017 -0600 ---------------------------------------------------------------------- .../org/apache/hadoop/hdfs/DFSInputStream.java | 48 +++++++----------- .../java/org/apache/hadoop/hdfs/TestPread.java | 52 +++++++++++++++++++- 2 files changed, 70 insertions(+), 30 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c62d2d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 669485a..0608d6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -511,33 +511,36 @@ public class DFSInputStream extends FSInputStream } else { // search cached blocks first - int targetBlockIdx = locatedBlocks.findBlock(offset); - if (targetBlockIdx < 0) { // block is not cached - targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx); - // fetch more blocks - final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset); - assert (newBlocks != null) : "Could not find target position " + offset; - locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks()); - } - blk = locatedBlocks.get(targetBlockIdx); + blk = fetchBlockAt(offset, 0, true); } return blk; } } /** Fetch a block from namenode and cache it */ - protected void fetchBlockAt(long offset) throws IOException { + protected LocatedBlock fetchBlockAt(long offset) throws IOException { + return fetchBlockAt(offset, 0, false); // don't use cache + } + + /** Fetch a block from namenode and cache it */ + private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache) + throws IOException { synchronized(infoLock) { int targetBlockIdx = locatedBlocks.findBlock(offset); if (targetBlockIdx < 0) { // block is not cached targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx); + useCache = false; } - // fetch blocks - final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset); - if (newBlocks == null) { - throw new IOException("Could not find target position " + offset); + if (!useCache) { // fetch blocks + final LocatedBlocks newBlocks = (length == 0) + ? dfsClient.getLocatedBlocks(src, offset) + : dfsClient.getLocatedBlocks(src, offset, length); + if (newBlocks == null || newBlocks.locatedBlockCount() == 0) { + throw new EOFException("Could not find target position " + offset); + } + locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks()); } - locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks()); + return locatedBlocks.get(targetBlockIdx); } } @@ -592,28 +595,15 @@ public class DFSInputStream extends FSInputStream assert (locatedBlocks != null) : "locatedBlocks is null"; List blockRange = new ArrayList<>(); // search cached blocks first - int blockIdx = locatedBlocks.findBlock(offset); - if (blockIdx < 0) { // block is not cached - blockIdx = LocatedBlocks.getInsertIndex(blockIdx); - } long remaining = length; long curOff = offset; while(remaining > 0) { - LocatedBlock blk = null; - if(blockIdx < locatedBlocks.locatedBlockCount()) - blk = locatedBlocks.get(blockIdx); - if (blk == null || curOff < blk.getStartOffset()) { - LocatedBlocks newBlocks; - newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining); - locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks()); - continue; - } + LocatedBlock blk = fetchBlockAt(curOff, remaining, true); assert curOff >= blk.getStartOffset() : "Block not found"; blockRange.add(blk); long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff; remaining -= bytesRead; curOff += bytesRead; - blockIdx++; } return blockRange; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/33c62d2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index c69f060..458f81c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.DataOutputStream; +import java.io.EOFException; import java.io.IOException; import java.util.ArrayList; import java.util.Random; @@ -29,7 +30,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; - +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataInputStream; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -494,6 +496,54 @@ public class TestPread { } } + @Test + public void testTruncateWhileReading() throws Exception { + Path path = new Path("/testfile"); + final int blockSize = 512; + + // prevent initial pre-fetch of multiple block locations + Configuration conf = new Configuration(); + conf.setLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, blockSize); + + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + try { + DistributedFileSystem fs = cluster.getFileSystem(); + // create multi-block file + FSDataOutputStream dos = + fs.create(path, true, blockSize, (short)1, blockSize); + dos.write(new byte[blockSize*3]); + dos.close(); + // truncate a file while it's open + final FSDataInputStream dis = fs.open(path); + while (!fs.truncate(path, 10)) { + Thread.sleep(10); + } + // verify that reading bytes outside the initial pre-fetch do + // not send the client into an infinite loop querying locations. + ExecutorService executor = Executors.newFixedThreadPool(1); + Future future = executor.submit(new Callable() { + @Override + public Void call() throws IOException { + // read from 2nd block. + dis.readFully(blockSize, new byte[4]); + return null; + } + }); + try { + future.get(4, TimeUnit.SECONDS); + Assert.fail(); + } catch (ExecutionException ee) { + assertTrue(ee.toString(), ee.getCause() instanceof EOFException); + } finally { + future.cancel(true); + executor.shutdown(); + } + } finally { + cluster.shutdown(); + } + } + public static void main(String[] args) throws Exception { new TestPread().testPreadDFS(); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org