From common-commits-return-98918-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Sun May 24 12:38:24 2020 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [207.244.88.153]) by mx-eu-01.ponee.io (Postfix) with SMTP id 3A59C180656 for ; Sun, 24 May 2020 14:38:24 +0200 (CEST) Received: (qmail 97663 invoked by uid 500); 24 May 2020 12:38:23 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 97653 invoked by uid 99); 24 May 2020 12:38:23 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 24 May 2020 12:38:23 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id EF9AF819F7; Sun, 24 May 2020 12:38:22 +0000 (UTC) Date: Sun, 24 May 2020 12:38:22 +0000 To: "common-commits@hadoop.apache.org" Subject: [hadoop] branch branch-3.3 updated: HDFS-15369. Refactor method VolumeScanner#runLoop(). Contributed by Yang Yun. MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <159032390254.12596.7152860728479637273@gitbox.apache.org> From: ayushsaxena@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: hadoop X-Git-Refname: refs/heads/branch-3.3 X-Git-Reftype: branch X-Git-Oldrev: c44d8b8bdf98d7e39e49344db5f1127cf4829967 X-Git-Newrev: 718c8a5868680115c0f5f8253a8c9f72e0069082 X-Git-Rev: 718c8a5868680115c0f5f8253a8c9f72e0069082 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. ayushsaxena pushed a commit to branch branch-3.3 in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/branch-3.3 by this push: new 718c8a5 HDFS-15369. Refactor method VolumeScanner#runLoop(). Contributed by Yang Yun. 718c8a5 is described below commit 718c8a5868680115c0f5f8253a8c9f72e0069082 Author: Ayush Saxena AuthorDate: Sun May 24 18:03:41 2020 +0530 HDFS-15369. Refactor method VolumeScanner#runLoop(). Contributed by Yang Yun. --- .../hadoop/hdfs/server/datanode/VolumeScanner.java | 81 +++++++++++++--------- 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index 5f1a1e0..5e3d523 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -484,6 +484,50 @@ public class VolumeScanner extends Thread { } /** + * Get next block and check if it's needed to scan. + * + * @return the candidate block. + */ + ExtendedBlock getNextBlockToScan() { + ExtendedBlock block; + try { + block = curBlockIter.nextBlock(); + } catch (IOException e) { + // There was an error listing the next block in the volume. This is a + // serious issue. + LOG.warn("{}: nextBlock error on {}", this, curBlockIter); + // On the next loop iteration, curBlockIter#eof will be set to true, and + // we will pick a different block iterator. + return null; + } + if (block == null) { + // The BlockIterator is at EOF. + LOG.info("{}: finished scanning block pool {}", + this, curBlockIter.getBlockPoolId()); + saveBlockIterator(curBlockIter); + return null; + } else if (conf.skipRecentAccessed) { + // Check the access time of block file to avoid scanning recently + // changed blocks, reducing disk IO. + try { + BlockLocalPathInfo blockLocalPathInfo = + volume.getDataset().getBlockLocalPathInfo(block); + BasicFileAttributes attr = Files.readAttributes( + new File(blockLocalPathInfo.getBlockPath()).toPath(), + BasicFileAttributes.class); + if (System.currentTimeMillis() - attr.lastAccessTime(). + to(TimeUnit.MILLISECONDS) < conf.scanPeriodMs) { + return null; + } + } catch (IOException ioe) { + LOG.debug("Failed to get access time of block {}", + block, ioe); + } + } + return block; + } + + /** * Run an iteration of the VolumeScanner loop. * * @param suspectBlock A suspect block which we should scan, or null to @@ -507,10 +551,10 @@ public class VolumeScanner extends Thread { return 30000L; } - // Find a usable block pool to scan. if (suspectBlock != null) { block = suspectBlock; } else { + // Find a usable block pool to scan. if ((curBlockIter == null) || curBlockIter.atEnd()) { long timeout = findNextUsableBlockIter(); if (timeout > 0) { @@ -528,40 +572,9 @@ public class VolumeScanner extends Thread { } return 0L; } - try { - block = curBlockIter.nextBlock(); - } catch (IOException e) { - // There was an error listing the next block in the volume. This is a - // serious issue. - LOG.warn("{}: nextBlock error on {}", this, curBlockIter); - // On the next loop iteration, curBlockIter#eof will be set to true, and - // we will pick a different block iterator. - return 0L; - } + block = getNextBlockToScan(); if (block == null) { - // The BlockIterator is at EOF. - LOG.info("{}: finished scanning block pool {}", - this, curBlockIter.getBlockPoolId()); - saveBlockIterator(curBlockIter); - return 0; - } else if (conf.skipRecentAccessed) { - // Check the access time of block file to avoid scanning recently - // changed blocks, reducing disk IO. - try { - BlockLocalPathInfo blockLocalPathInfo = - volume.getDataset().getBlockLocalPathInfo(block); - BasicFileAttributes attr = Files.readAttributes( - new File(blockLocalPathInfo.getBlockPath()).toPath(), - BasicFileAttributes.class); - if (System.currentTimeMillis() - attr.lastAccessTime(). - to(TimeUnit.MILLISECONDS) < conf.scanPeriodMs) { - return 0; - } - - } catch (IOException ioe) { - LOG.debug("Failed to get access time of block {}", - block, ioe); - } + return 0L; } } if (curBlockIter != null) { --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org