Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 7FD98172CA for ; Fri, 20 Mar 2015 16:46:14 +0000 (UTC) Received: (qmail 81949 invoked by uid 500); 20 Mar 2015 16:46:13 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 81885 invoked by uid 500); 20 Mar 2015 16:46:12 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 81866 invoked by uid 99); 20 Mar 2015 16:46:12 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 20 Mar 2015 16:46:12 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id BF1C8E1102; Fri, 20 Mar 2015 16:46:12 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: yjzhangal@apache.org To: common-commits@hadoop.apache.org Message-Id: <28884d85edde4a5982293aae70d2fe3b@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for DFSClient. Contributed by Zhihai Xu. Date: Fri, 20 Mar 2015 16:46:12 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/branch-2 ed7f847c9 -> 0a41b1bdc HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for DFSClient. Contributed by Zhihai Xu. (cherry-picked from commit 15612313f578a5115f8d03885e9b0c8c376ed56e) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a41b1bd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a41b1bd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a41b1bd Branch: refs/heads/branch-2 Commit: 0a41b1bdcadbdfc427348799f89d122a152d41fe Parents: ed7f847 Author: Yongjun Zhang Authored: Fri Mar 20 08:59:44 2015 -0700 Committer: Yongjun Zhang Committed: Fri Mar 20 09:25:59 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/DFSClient.java | 11 ++++++++++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++ .../org/apache/hadoop/hdfs/DFSOutputStream.java | 10 +++++---- .../src/main/resources/hdfs-default.xml | 7 +++++++ .../hadoop/hdfs/TestDFSClientRetries.java | 22 ++++++++++++++++++++ 6 files changed, 52 insertions(+), 4 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f9c4258..402504d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -10,6 +10,9 @@ Release 2.8.0 - UNRELEASED HDFS-2360. Ugly stacktrace when quota exceeds. (harsh) + HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for + DFSClient. (Zhihai Xu via Yongjun Zhang) + OPTIMIZATIONS BUG FIXES http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 658cccf..74c0d78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -24,6 +24,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAUL import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT; @@ -307,6 +309,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, final int nCachedConnRetry; final int nBlockWriteRetry; final int nBlockWriteLocateFollowingRetry; + final int blockWriteLocateFollowingInitialDelayMs; final long defaultBlockSize; final long prefetchSize; final short defaultReplication; @@ -418,6 +421,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, nBlockWriteLocateFollowingRetry = conf.getInt( DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT); + blockWriteLocateFollowingInitialDelayMs = conf.getInt( + DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, + DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT); uMask = FsPermission.getUMask(conf); connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); @@ -568,6 +574,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } return dataChecksum; } + + @VisibleForTesting + public int getBlockWriteLocateFollowingInitialDelayMs() { + return blockWriteLocateFollowingInitialDelayMs; + } } public Conf getConf() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index d1c37df..0894e0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -401,6 +401,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // Much code in hdfs is not yet updated to use these keys. public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries"; public static final int DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5; + // the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry. + public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY = "dfs.client.block.write.locateFollowingBlock.initial.delay.ms"; + public static final int DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT = 400; public static final String DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries"; public static final int DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3; public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 6a403af..4eca7d1 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -1432,7 +1432,8 @@ public class DFSOutputStream extends FSOutputSummer private LocatedBlock locateFollowingBlock(long start, DatanodeInfo[] excludedNodes) throws IOException { int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry; - long sleeptime = 400; + long sleeptime = dfsClient.getConf(). + blockWriteLocateFollowingInitialDelayMs; while (true) { long localstart = Time.now(); while (true) { @@ -2257,7 +2258,8 @@ public class DFSOutputStream extends FSOutputSummer // be called during unit tests private void completeFile(ExtendedBlock last) throws IOException { long localstart = Time.now(); - long localTimeout = 400; + long sleeptime = dfsClient.getConf(). + blockWriteLocateFollowingInitialDelayMs; boolean fileComplete = false; int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry; while (!fileComplete) { @@ -2280,8 +2282,8 @@ public class DFSOutputStream extends FSOutputSummer + " does not have enough number of replicas."); } retries--; - Thread.sleep(localTimeout); - localTimeout *= 2; + Thread.sleep(sleeptime); + sleeptime *= 2; if (Time.now() - localstart > 5000) { DFSClient.LOG.info("Could not complete " + src + " retrying..."); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index a8c2400..8a22a52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2305,4 +2305,11 @@ Whether pin blocks on favored DataNode. + + dfs.client.block.write.locateFollowingBlock.initial.delay.ms + 400 + The initial delay (unit is ms) for locateFollowingBlock, + the delay time will increase exponentially(double) for each retry. + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 382ad48..45f21df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -1126,4 +1126,26 @@ public class TestDFSClientRetries { cluster.shutdown(); } } + + @Test + public void testDFSClientConfigurationLocateFollowingBlockInitialDelay() + throws Exception { + // test if DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY + // is not configured, verify DFSClient uses the default value 400. + Configuration dfsConf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(dfsConf).build(); + cluster.waitActive(); + NamenodeProtocols nn = cluster.getNameNodeRpc(); + DFSClient client = new DFSClient(null, nn, dfsConf, null); + assertEquals(client.getConf(). + getBlockWriteLocateFollowingInitialDelayMs(), 400); + + // change DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, + // verify DFSClient uses the configured value 1000. + dfsConf.setInt(DFSConfigKeys. + DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, 1000); + client = new DFSClient(null, nn, dfsConf, null); + assertEquals(client.getConf(). + getBlockWriteLocateFollowingInitialDelayMs(), 1000); + } }