Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 49BF81764E for ; Tue, 19 May 2015 05:15:09 +0000 (UTC) Received: (qmail 78723 invoked by uid 500); 19 May 2015 05:15:06 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 78563 invoked by uid 500); 19 May 2015 05:15:06 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 78144 invoked by uid 99); 19 May 2015 05:15:05 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 19 May 2015 05:15:05 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id CB3D8E0FE2; Tue, 19 May 2015 05:15:05 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jing9@apache.org To: common-commits@hadoop.apache.org Date: Tue, 19 May 2015 05:15:13 -0000 Message-Id: In-Reply-To: <681a0fd8ac594fc48224815e1d855e17@git.apache.org> References: <681a0fd8ac594fc48224815e1d855e17@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [09/50] hadoop git commit: HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. Contributed by Kai Sasaki. HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. Contributed by Kai Sasaki. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8490ffe6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8490ffe6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8490ffe6 Branch: refs/heads/HDFS-7285 Commit: 8490ffe6a2cd253512cf48b50e2d67eb0343a72e Parents: 0751690 Author: Jing Zhao Authored: Tue Apr 28 13:42:24 2015 -0700 Committer: Jing Zhao Committed: Mon May 18 22:11:06 2015 -0700 ---------------------------------------------------------------------- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 ++++- .../main/java/org/apache/hadoop/hdfs/DFSClient.java | 7 ++++++- .../apache/hadoop/hdfs/DFSStripedInputStream.java | 5 +++-- .../hadoop/hdfs/TestDFSStripedInputStream.java | 16 +++++++--------- .../org/apache/hadoop/hdfs/TestReadStripedFile.java | 11 ++++++++--- 5 files changed, 28 insertions(+), 16 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8490ffe6/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 6c5d7ce..9b4bf24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -139,4 +139,7 @@ commands from standbynode if any (vinayakumarb) HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated - as Idempotent (vinayakumarb) \ No newline at end of file + as Idempotent (vinayakumarb) + + HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. + (Kai Sasaki via jing9) http://git-wip-us.apache.org/repos/asf/hadoop/blob/8490ffe6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index db13ae8..5fb23a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1191,7 +1191,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, // Get block info from namenode TraceScope scope = getPathTraceScope("newDFSInputStream", src); try { - return new DFSInputStream(this, src, verifyChecksum); + ECInfo info = getErasureCodingInfo(src); + if (info != null) { + return new DFSStripedInputStream(this, src, verifyChecksum, info); + } else { + return new DFSInputStream(this, src, verifyChecksum); + } } finally { scope.close(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8490ffe6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java index fe9e101..f6f7ed2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java @@ -134,11 +134,12 @@ public class DFSStripedInputStream extends DFSInputStream { private final short parityBlkNum; private final ECInfo ecInfo; - DFSStripedInputStream(DFSClient dfsClient, String src, boolean verifyChecksum) + DFSStripedInputStream(DFSClient dfsClient, String src, boolean verifyChecksum, ECInfo info) throws IOException { super(dfsClient, src, verifyChecksum); // ECInfo is restored from NN just before reading striped file. - ecInfo = dfsClient.getErasureCodingInfo(src); + assert info != null; + ecInfo = info; cellSize = ecInfo.getSchema().getChunkSize(); dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits(); parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/8490ffe6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index cf10981..bcfc74b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -167,10 +168,9 @@ public class TestDFSStripedInputStream { writeBytes, fileLength); // pread - try (DFSStripedInputStream dis = - new DFSStripedInputStream(fs.getClient(), src, true)) { + try (FSDataInputStream fsdis = fs.open(new Path(src))) { byte[] buf = new byte[writeBytes + 100]; - int readLen = dis.read(0, buf, 0, buf.length); + int readLen = fsdis.read(0, buf, 0, buf.length); readLen = readLen >= 0 ? readLen : 0; Assert.assertEquals("The length of file should be the same to write size", writeBytes, readLen); @@ -180,13 +180,12 @@ public class TestDFSStripedInputStream { } // stateful read with byte array - try (DFSStripedInputStream dis = - new DFSStripedInputStream(fs.getClient(), src, true)) { + try (FSDataInputStream fsdis = fs.open(new Path(src))) { byte[] buf = new byte[writeBytes + 100]; int readLen = 0; int ret; do { - ret = dis.read(buf, readLen, buf.length - readLen); + ret = fsdis.read(buf, readLen, buf.length - readLen); if (ret > 0) { readLen += ret; } @@ -201,13 +200,12 @@ public class TestDFSStripedInputStream { } // stateful read with ByteBuffer - try (DFSStripedInputStream dis = - new DFSStripedInputStream(fs.getClient(), src, true)) { + try (FSDataInputStream fsdis = fs.open(new Path(src))) { ByteBuffer buf = ByteBuffer.allocate(writeBytes + 100); int readLen = 0; int ret; do { - ret = dis.read(buf); + ret = fsdis.read(buf); if (ret > 0) { readLen += ret; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8490ffe6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java index d980bd6..1ad480e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFile.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ECInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -33,6 +34,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.hdfs.server.namenode.ECSchemaManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.junit.After; import org.junit.Before; @@ -52,6 +54,8 @@ public class TestReadStripedFile { private DistributedFileSystem fs; private final Path dirPath = new Path("/striped"); private Path filePath = new Path(dirPath, "file"); + private ECInfo info = new ECInfo(filePath.toString(), + ECSchemaManager.getSystemDefaultSchema()); private final short DATA_BLK_NUM = HdfsConstants.NUM_DATA_BLOCKS; private final short PARITY_BLK_NUM = HdfsConstants.NUM_PARITY_BLOCKS; private final int CELLSIZE = HdfsConstants.BLOCK_STRIPED_CELL_SIZE; @@ -89,7 +93,7 @@ public class TestReadStripedFile { LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks); final DFSStripedInputStream in = - new DFSStripedInputStream(fs.getClient(), filePath.toString(), false); + new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, info); List lbList = lbs.getLocatedBlocks(); for (LocatedBlock aLbList : lbList) { @@ -124,7 +128,8 @@ public class TestReadStripedFile { bg.getBlock().getBlockPoolId()); } DFSStripedInputStream in = - new DFSStripedInputStream(fs.getClient(), filePath.toString(), false); + new DFSStripedInputStream(fs.getClient(), + filePath.toString(), false, info); int readSize = BLOCK_GROUP_SIZE; byte[] readBuffer = new byte[readSize]; int ret = in.read(0, readBuffer, 0, readSize); @@ -170,7 +175,7 @@ public class TestReadStripedFile { DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), - false); + false, info); byte[] expected = new byte[fileSize];