Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 8004F18106 for ; Wed, 30 Sep 2015 18:23:00 +0000 (UTC) Received: (qmail 48251 invoked by uid 500); 30 Sep 2015 18:22:38 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 48127 invoked by uid 500); 30 Sep 2015 18:22:38 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 46235 invoked by uid 99); 30 Sep 2015 18:22:37 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 30 Sep 2015 18:22:37 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 1A4E2E060D; Wed, 30 Sep 2015 18:22:37 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: aengineer@apache.org To: common-commits@hadoop.apache.org Date: Wed, 30 Sep 2015 18:23:18 -0000 Message-Id: <18caca7fcfe743cf9baf8affeec4841a@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [44/52] [abbrv] hadoop git commit: Revert "HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai." http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index e122748..e245d2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -233,12 +233,16 @@ public class WebHdfsFileSystem extends FileSystem // refetch tokens. even if ugi has credentials, don't attempt // to get another token to match hdfs/rpc behavior if (token != null) { - LOG.debug("Using UGI token: {}", token); + if(LOG.isDebugEnabled()) { + LOG.debug("Using UGI token: {}", token); + } canRefreshDelegationToken = false; } else { token = getDelegationToken(null); if (token != null) { - LOG.debug("Fetched new token: {}", token); + if(LOG.isDebugEnabled()) { + LOG.debug("Fetched new token: {}", token); + } } else { // security is disabled canRefreshDelegationToken = false; } @@ -253,7 +257,9 @@ public class WebHdfsFileSystem extends FileSystem boolean replaced = false; if (canRefreshDelegationToken) { Token token = getDelegationToken(null); - LOG.debug("Replaced expired token: {}", token); + if(LOG.isDebugEnabled()) { + LOG.debug("Replaced expired token: {}", token); + } setDelegationToken(token); replaced = (token != null); } @@ -436,7 +442,9 @@ public class WebHdfsFileSystem extends FileSystem InetSocketAddress nnAddr = getCurrentNNAddr(); final URL url = new URL(getTransportScheme(), nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); - LOG.trace("url={}", url); + if (LOG.isTraceEnabled()) { + LOG.trace("url={}", url); + } return url; } @@ -471,7 +479,9 @@ public class WebHdfsFileSystem extends FileSystem + Param.toSortedString("&", getAuthParameters(op)) + Param.toSortedString("&", parameters); final URL url = getNamenodeURL(path, query); - LOG.trace("url={}", url); + if (LOG.isTraceEnabled()) { + LOG.trace("url={}", url); + } return url; } @@ -759,7 +769,9 @@ public class WebHdfsFileSystem extends FileSystem } catch (Exception e) { // catch json parser errors final IOException ioe = new IOException("Response decoding failure: "+e.toString(), e); - LOG.debug("Response decoding failure.", e); + if (LOG.isDebugEnabled()) { + LOG.debug("Response decoding failure: {}", e.toString(), e); + } throw ioe; } finally { conn.disconnect(); @@ -1230,7 +1242,9 @@ public class WebHdfsFileSystem extends FileSystem cancelDelegationToken(delegationToken); } } catch (IOException ioe) { - LOG.debug("Token cancel failed: ", ioe); + if (LOG.isDebugEnabled()) { + LOG.debug("Token cancel failed: ", ioe); + } } finally { super.close(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dfd0b57..7b62b97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1000,9 +1000,6 @@ Release 2.8.0 - UNRELEASED HDFS-8696. Make the lower and higher watermark in the DN Netty server configurable. (Xiaobing Zhou via wheat9) - HDFS-8971. Remove guards when calling LOG.debug() and LOG.trace() in client - package. (Mingliang Liu via wheat9) - OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than