Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 1E1F118366 for ; Wed, 20 Jan 2016 19:27:01 +0000 (UTC) Received: (qmail 69942 invoked by uid 500); 20 Jan 2016 19:27:00 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 69878 invoked by uid 500); 20 Jan 2016 19:27:00 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 69869 invoked by uid 99); 20 Jan 2016 19:27:00 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 20 Jan 2016 19:27:00 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id B78AFDFF93; Wed, 20 Jan 2016 19:27:00 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: cmccabe@apache.org To: common-commits@hadoop.apache.org Message-Id: <2f45978a25fa4653bf2e622e45bc794e@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-9576: HTrace: collect position/length information on read operations (zhz via cmccabe) Date: Wed, 20 Jan 2016 19:27:00 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk 142557869 -> 7905788db HDFS-9576: HTrace: collect position/length information on read operations (zhz via cmccabe) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7905788d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7905788d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7905788d Branch: refs/heads/trunk Commit: 7905788db94d560e6668af0d4bed22b326961aaf Parents: 1425578 Author: Colin Patrick Mccabe Authored: Wed Jan 20 11:26:44 2016 -0800 Committer: Colin Patrick Mccabe Committed: Wed Jan 20 11:26:44 2016 -0800 ---------------------------------------------------------------------- .../java/org/apache/hadoop/hdfs/DFSClient.java | 19 +++++++++++ .../org/apache/hadoop/hdfs/DFSInputStream.java | 34 ++++++++++++++------ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ 3 files changed, 47 insertions(+), 9 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7905788d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 2c42cbe..3b9210a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2967,6 +2967,25 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } /** + * Full detailed tracing for read requests: path, position in the file, + * and length. + * + * @param reqLen requested length + */ + TraceScope newReaderTraceScope(String description, String path, long pos, + int reqLen) { + TraceScope scope = newPathTraceScope(description, path); + scope.addKVAnnotation("pos", Long.toString(pos)); + scope.addKVAnnotation("reqLen", Integer.toString(reqLen)); + return scope; + } + + /** Add the returned length info to the scope. */ + void addRetLenToReaderScope(TraceScope scope, int retLen) { + scope.addKVAnnotation("retLen", Integer.toString(retLen)); + } + + /** * Get the erasure coding policy information for the specified path * * @param src path to get the information for http://git-wip-us.apache.org/repos/asf/hadoop/blob/7905788d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index b6b11ee..3de60b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -973,18 +973,29 @@ public class DFSInputStream extends FSInputStream public synchronized int read(@Nonnull final byte buf[], int off, int len) throws IOException { ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf); - try (TraceScope ignored = - dfsClient.newPathTraceScope("DFSInputStream#byteArrayRead", src)) { - return readWithStrategy(byteArrayReader, off, len); + try (TraceScope scope = + dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead", + src, getPos(), len)) { + int retLen = readWithStrategy(byteArrayReader, off, len); + if (retLen < len) { + dfsClient.addRetLenToReaderScope(scope, retLen); + } + return retLen; } } @Override public synchronized int read(final ByteBuffer buf) throws IOException { ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf); - try (TraceScope ignored = - dfsClient.newPathTraceScope("DFSInputStream#byteBufferRead", src)){ - return readWithStrategy(byteBufferReader, 0, buf.remaining()); + int reqLen = buf.remaining(); + try (TraceScope scope = + dfsClient.newReaderTraceScope("DFSInputStream#byteBufferRead", + src, getPos(), reqLen)){ + int retLen = readWithStrategy(byteBufferReader, 0, reqLen); + if (retLen < reqLen) { + dfsClient.addRetLenToReaderScope(scope, retLen); + } + return retLen; } } @@ -1433,9 +1444,14 @@ public class DFSInputStream extends FSInputStream @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { - try (TraceScope ignored = dfsClient. - newPathTraceScope("DFSInputStream#byteArrayPread", src)) { - return pread(position, buffer, offset, length); + try (TraceScope scope = dfsClient. + newReaderTraceScope("DFSInputStream#byteArrayPread", + src, position, length)) { + int retLen = pread(position, buffer, offset, length); + if (retLen < length) { + dfsClient.addRetLenToReaderScope(scope, retLen); + } + return retLen; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7905788d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e6074bd..7a5290e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -936,6 +936,9 @@ Release 2.9.0 - UNRELEASED HDFS-9624. DataNode start slowly due to the initial DU command operations. (Lin Yiqun via wang) + HDFS-9576: HTrace: collect position/length information on read operations + (zhz via cmccabe) + OPTIMIZATIONS BUG FIXES