Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B50BC1864C for ; Wed, 30 Sep 2015 15:41:34 +0000 (UTC) Received: (qmail 90704 invoked by uid 500); 30 Sep 2015 15:41:04 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 89953 invoked by uid 500); 30 Sep 2015 15:41:04 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 86470 invoked by uid 99); 30 Sep 2015 15:41:02 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 30 Sep 2015 15:41:02 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 3B3DEE0A51; Wed, 30 Sep 2015 15:41:02 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Wed, 30 Sep 2015 15:41:49 -0000 Message-Id: <5b05e7d65324498c9f268a451a756add@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [49/58] [abbrv] hadoop git commit: HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai. http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5a9a3da/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index e245d2a..e122748 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -233,16 +233,12 @@ public class WebHdfsFileSystem extends FileSystem // refetch tokens. even if ugi has credentials, don't attempt // to get another token to match hdfs/rpc behavior if (token != null) { - if(LOG.isDebugEnabled()) { - LOG.debug("Using UGI token: {}", token); - } + LOG.debug("Using UGI token: {}", token); canRefreshDelegationToken = false; } else { token = getDelegationToken(null); if (token != null) { - if(LOG.isDebugEnabled()) { - LOG.debug("Fetched new token: {}", token); - } + LOG.debug("Fetched new token: {}", token); } else { // security is disabled canRefreshDelegationToken = false; } @@ -257,9 +253,7 @@ public class WebHdfsFileSystem extends FileSystem boolean replaced = false; if (canRefreshDelegationToken) { Token token = getDelegationToken(null); - if(LOG.isDebugEnabled()) { - LOG.debug("Replaced expired token: {}", token); - } + LOG.debug("Replaced expired token: {}", token); setDelegationToken(token); replaced = (token != null); } @@ -442,9 +436,7 @@ public class WebHdfsFileSystem extends FileSystem InetSocketAddress nnAddr = getCurrentNNAddr(); final URL url = new URL(getTransportScheme(), nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); - if (LOG.isTraceEnabled()) { - LOG.trace("url={}", url); - } + LOG.trace("url={}", url); return url; } @@ -479,9 +471,7 @@ public class WebHdfsFileSystem extends FileSystem + Param.toSortedString("&", getAuthParameters(op)) + Param.toSortedString("&", parameters); final URL url = getNamenodeURL(path, query); - if (LOG.isTraceEnabled()) { - LOG.trace("url={}", url); - } + LOG.trace("url={}", url); return url; } @@ -769,9 +759,7 @@ public class WebHdfsFileSystem extends FileSystem } catch (Exception e) { // catch json parser errors final IOException ioe = new IOException("Response decoding failure: "+e.toString(), e); - if (LOG.isDebugEnabled()) { - LOG.debug("Response decoding failure: {}", e.toString(), e); - } + LOG.debug("Response decoding failure.", e); throw ioe; } finally { conn.disconnect(); @@ -1242,9 +1230,7 @@ public class WebHdfsFileSystem extends FileSystem cancelDelegationToken(delegationToken); } } catch (IOException ioe) { - if (LOG.isDebugEnabled()) { - LOG.debug("Token cancel failed: ", ioe); - } + LOG.debug("Token cancel failed: ", ioe); } finally { super.close(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5a9a3da/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7b62b97..dfd0b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1000,6 +1000,9 @@ Release 2.8.0 - UNRELEASED HDFS-8696. Make the lower and higher watermark in the DN Netty server configurable. (Xiaobing Zhou via wheat9) + HDFS-8971. Remove guards when calling LOG.debug() and LOG.trace() in client + package. (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than