Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9AD3D17B70 for ; Mon, 2 Mar 2015 17:15:25 +0000 (UTC) Received: (qmail 96896 invoked by uid 500); 2 Mar 2015 17:15:19 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 96490 invoked by uid 500); 2 Mar 2015 17:15:19 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 94754 invoked by uid 99); 2 Mar 2015 17:15:18 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 02 Mar 2015 17:15:18 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 05C24E1026; Mon, 2 Mar 2015 17:15:18 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Mon, 02 Mar 2015 17:15:42 -0000 Message-Id: In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [26/50] [abbrv] hadoop git commit: HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get files list for scanning (Contributed by J.Andreina) HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get files list for scanning (Contributed by J.Andreina) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54bcb5f7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54bcb5f7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54bcb5f7 Branch: refs/heads/HDFS-7285 Commit: 54bcb5f7e3fa8c1e4f3f325a4c3b6a24f61e4687 Parents: 04f0dae Author: Vinayakumar B Authored: Fri Feb 27 16:36:28 2015 +0530 Committer: Zhe Zhang Committed: Mon Mar 2 09:13:53 2015 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hadoop/hdfs/server/datanode/DirectoryScanner.java | 12 +++++++++--- .../hdfs/server/datanode/TestDirectoryScanner.java | 9 ++++++--- 4 files changed, 19 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ba553dc..8556afd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1040,6 +1040,9 @@ Release 2.7.0 - UNRELEASED HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 bit. (Kiran Kumar M R via cnauroth) + HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get + files list for scanning (J.Andreina via vinayakumarb) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f233e02..92ddb7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -815,7 +815,7 @@ public class DataNode extends ReconfigurableBase reason = "verifcation is not supported by SimulatedFSDataset"; } if (reason == null) { - directoryScanner = new DirectoryScanner(data, conf); + directoryScanner = new DirectoryScanner(this, data, conf); directoryScanner.start(); } else { LOG.info("Periodic Directory Tree Verification scan is disabled because " + http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 09c2914..c7ee21e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -63,6 +63,7 @@ public class DirectoryScanner implements Runnable { private final long scanPeriodMsecs; private volatile boolean shouldRun = false; private boolean retainDiffs = false; + private final DataNode datanode; final ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool(); final Map stats = new HashMap(); @@ -308,7 +309,8 @@ public class DirectoryScanner implements Runnable { } } - DirectoryScanner(FsDatasetSpi dataset, Configuration conf) { + DirectoryScanner(DataNode datanode, FsDatasetSpi dataset, Configuration conf) { + this.datanode = datanode; this.dataset = dataset; int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT); @@ -547,7 +549,7 @@ public class DirectoryScanner implements Runnable { for (int i = 0; i < volumes.size(); i++) { if (isValid(dataset, volumes.get(i))) { ReportCompiler reportCompiler = - new ReportCompiler(volumes.get(i)); + new ReportCompiler(datanode,volumes.get(i)); Future result = reportCompileThreadPool.submit(reportCompiler); compilersInProgress.put(i, result); @@ -585,8 +587,10 @@ public class DirectoryScanner implements Runnable { private static class ReportCompiler implements Callable { private final FsVolumeSpi volume; + private final DataNode datanode; - public ReportCompiler(FsVolumeSpi volume) { + public ReportCompiler(DataNode datanode, FsVolumeSpi volume) { + this.datanode = datanode; this.volume = volume; } @@ -611,6 +615,8 @@ public class DirectoryScanner implements Runnable { files = FileUtil.listFiles(dir); } catch (IOException ioe) { LOG.warn("Exception occured while compiling report: ", ioe); + // Initiate a check on disk failure. + datanode.checkDiskErrorAsync(); // Ignore this directory and proceed. return report; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 681768e..0e0e959 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -299,10 +299,11 @@ public class TestDirectoryScanner { .build(); try { cluster.waitActive(); + DataNode dataNode = cluster.getDataNodes().get(0); bpid = cluster.getNamesystem().getBlockPoolId(); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(dataNode, fds, CONF); scanner.setRetainDiffs(true); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); @@ -340,9 +341,10 @@ public class TestDirectoryScanner { try { cluster.waitActive(); bpid = cluster.getNamesystem().getBlockPoolId(); + DataNode dataNode = cluster.getDataNodes().get(0); fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); client = cluster.getFileSystem().getClient(); - scanner = new DirectoryScanner(fds, CONF); + scanner = new DirectoryScanner(dataNode, fds, CONF); scanner.setRetainDiffs(true); FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0)); @@ -389,7 +391,8 @@ public class TestDirectoryScanner { client = cluster.getFileSystem().getClient(); CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, parallelism); - scanner = new DirectoryScanner(fds, CONF); + DataNode dataNode = cluster.getDataNodes().get(0); + scanner = new DirectoryScanner(dataNode, fds, CONF); scanner.setRetainDiffs(true); // Add files with 100 blocks