Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 3D41D173A9 for ; Thu, 20 Aug 2015 06:27:43 +0000 (UTC) Received: (qmail 81192 invoked by uid 500); 20 Aug 2015 06:27:27 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 81078 invoked by uid 500); 20 Aug 2015 06:27:27 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 79005 invoked by uid 99); 20 Aug 2015 06:27:26 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 20 Aug 2015 06:27:26 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 14652E7DB2; Thu, 20 Aug 2015 06:27:26 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Thu, 20 Aug 2015 06:27:50 -0000 Message-Id: <0f14bf6da08441e4b65bcde3ac4487a3@git.apache.org> In-Reply-To: <30da06587e284bbaa865757bb67ae52e@git.apache.org> References: <30da06587e284bbaa865757bb67ae52e@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [26/50] [abbrv] hadoop git commit: HDFS-8804. Erasure Coding: use DirectBufferPool in DFSStripedInputStream for buffer allocation. Contributed by Jing Zhao. HDFS-8804. Erasure Coding: use DirectBufferPool in DFSStripedInputStream for buffer allocation. Contributed by Jing Zhao. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9312b168 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9312b168 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9312b168 Branch: refs/heads/HDFS-7285-merge Commit: 9312b168e2f152ebfb8a7c7b63e74a819adfc5d2 Parents: ba90c02 Author: Jing Zhao Authored: Mon Aug 3 17:03:15 2015 -0700 Committer: Jing Zhao Committed: Mon Aug 3 17:03:15 2015 -0700 ---------------------------------------------------------------------- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ++ .../hadoop/hdfs/DFSStripedInputStream.java | 33 ++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9312b168/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 673fbab..f087bb4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -382,3 +382,6 @@ HDFS-8202. Improve end to end stirpping file test to add erasure recovering test. (Xinwei Qin via zhz) + + HDFS-8804. Erasure Coding: use DirectBufferPool in DFSStripedInputStream for + buffer allocation. (jing9) http://git-wip-us.apache.org/repos/asf/hadoop/blob/9312b168/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java index 1f64d4e..3612063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java @@ -39,6 +39,7 @@ import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder; +import org.apache.hadoop.util.DirectBufferPool; import java.io.EOFException; import java.io.IOException; @@ -136,6 +137,8 @@ public class DFSStripedInputStream extends DFSInputStream { } } + private static final DirectBufferPool bufferPool = new DirectBufferPool(); + private final BlockReaderInfo[] blockReaders; private final int cellSize; private final short dataBlkNum; @@ -143,6 +146,7 @@ public class DFSStripedInputStream extends DFSInputStream { private final int groupSize; /** the buffer for a complete stripe */ private ByteBuffer curStripeBuf; + private ByteBuffer parityBuf; private final ECSchema schema; private final RawErasureDecoder decoder; @@ -177,12 +181,20 @@ public class DFSStripedInputStream extends DFSInputStream { private void resetCurStripeBuffer() { if (curStripeBuf == null) { - curStripeBuf = ByteBuffer.allocateDirect(cellSize * dataBlkNum); + curStripeBuf = bufferPool.getBuffer(cellSize * dataBlkNum); } curStripeBuf.clear(); curStripeRange = new StripeRange(0, 0); } + private ByteBuffer getParityBuffer() { + if (parityBuf == null) { + parityBuf = bufferPool.getBuffer(cellSize * parityBlkNum); + } + parityBuf.clear(); + return parityBuf; + } + /** * When seeking into a new block group, create blockReader for each internal * block in the group. @@ -204,6 +216,19 @@ public class DFSStripedInputStream extends DFSInputStream { currentLocatedBlock = targetBlockGroup; } + @Override + public synchronized void close() throws IOException { + super.close(); + if (curStripeBuf != null) { + bufferPool.returnBuffer(curStripeBuf); + curStripeBuf = null; + } + if (parityBuf != null) { + bufferPool.returnBuffer(parityBuf); + parityBuf = null; + } + } + /** * Extend the super method with the logic of switching between cells. * When reaching the end of a cell, proceed to the next cell and read it @@ -830,8 +855,10 @@ public class DFSStripedInputStream extends DFSInputStream { } final int decodeIndex = StripedBlockUtil.convertIndex4Decode(index, dataBlkNum, parityBlkNum); - decodeInputs[decodeIndex] = ByteBuffer.allocateDirect( - (int) alignedStripe.range.spanInBlock); + ByteBuffer buf = getParityBuffer().duplicate(); + buf.position(cellSize * decodeIndex); + buf.limit(cellSize * decodeIndex + (int) alignedStripe.range.spanInBlock); + decodeInputs[decodeIndex] = buf.slice(); alignedStripe.chunks[index] = new StripingChunk(decodeInputs[decodeIndex]); return true; }