Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id E23FA200B81 for ; Tue, 13 Sep 2016 10:29:18 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id E0E13160ADA; Tue, 13 Sep 2016 08:29:18 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 0DE1E160AC6 for ; Tue, 13 Sep 2016 10:29:17 +0200 (CEST) Received: (qmail 61390 invoked by uid 500); 13 Sep 2016 08:29:15 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 61156 invoked by uid 99); 13 Sep 2016 08:29:15 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 13 Sep 2016 08:29:15 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 933A0E08BA; Tue, 13 Sep 2016 08:29:15 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: drankye@apache.org To: common-commits@hadoop.apache.org Date: Tue, 13 Sep 2016 08:29:19 -0000 Message-Id: <97505a972aa547bda49c5b71ef35c28a@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [05/50] [abbrv] hadoop git commit: HDFS-9781. FsDatasetImpl#getBlockReports can occasionally throw NullPointerException. Contributed by Manoj Govindassamy. archived-at: Tue, 13 Sep 2016 08:29:19 -0000 HDFS-9781. FsDatasetImpl#getBlockReports can occasionally throw NullPointerException. Contributed by Manoj Govindassamy. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07650bc3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07650bc3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07650bc3 Branch: refs/heads/HDFS-10285 Commit: 07650bc37a3c78ecc6566d813778d0954d0b06b0 Parents: f6ea9be Author: Xiao Chen Authored: Fri Sep 2 15:26:20 2016 -0700 Committer: Xiao Chen Committed: Fri Sep 2 15:33:11 2016 -0700 ---------------------------------------------------------------------- .../datanode/fsdataset/impl/FsDatasetImpl.java | 21 +++++++-- .../fsdataset/impl/TestFsDatasetImpl.java | 49 ++++++++++++++++---- 2 files changed, 56 insertions(+), 14 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/07650bc3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 129024b..e0d2baf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1827,13 +1827,24 @@ class FsDatasetImpl implements FsDatasetSpi { Map builders = new HashMap(); - List curVolumes = volumes.getVolumes(); - for (FsVolumeSpi v : curVolumes) { - builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength)); - } - + List curVolumes = null; try (AutoCloseableLock lock = datasetLock.acquire()) { + curVolumes = volumes.getVolumes(); + for (FsVolumeSpi v : curVolumes) { + builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength)); + } + + Set missingVolumesReported = new HashSet<>(); for (ReplicaInfo b : volumeMap.replicas(bpid)) { + String volStorageID = b.getVolume().getStorageID(); + if (!builders.containsKey(volStorageID)) { + if (!missingVolumesReported.contains(volStorageID)) { + LOG.warn("Storage volume: " + volStorageID + " missing for the" + + " replica block: " + b + ". Probably being removed!"); + missingVolumesReported.add(volStorageID); + } + continue; + } switch(b.getState()) { case FINALIZED: case RBW: http://git-wip-us.apache.org/repos/asf/hadoop/blob/07650bc3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index e73a612..b946803 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -590,8 +590,15 @@ public class TestFsDatasetImpl { final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0); final CountDownLatch startFinalizeLatch = new CountDownLatch(1); final CountDownLatch brReceivedLatch = new CountDownLatch(1); + final CountDownLatch volRemovedLatch = new CountDownLatch(1); class BlockReportThread extends Thread { public void run() { + // Lets wait for the volume remove process to start + try { + volRemovedLatch.await(); + } catch (Exception e) { + LOG.info("Unexpected exception when waiting for vol removal:", e); + } LOG.info("Getting block report"); dataset.getBlockReports(eb.getBlockPoolId()); LOG.info("Successfully received block report"); @@ -599,18 +606,27 @@ public class TestFsDatasetImpl { } } - final BlockReportThread brt = new BlockReportThread(); class ResponderThread extends Thread { public void run() { try (ReplicaHandler replica = dataset .createRbw(StorageType.DEFAULT, eb, false)) { - LOG.info("createRbw finished"); + LOG.info("CreateRbw finished"); startFinalizeLatch.countDown(); - // Slow down while we're holding the reference to the volume - Thread.sleep(1000); + // Slow down while we're holding the reference to the volume. + // As we finalize a block, the volume is removed in parallel. + // Ignore any interrupts coming out of volume shutdown. + try { + Thread.sleep(1000); + } catch (InterruptedException ie) { + LOG.info("Ignoring ", ie); + } + + // Lets wait for the other thread finish getting block report + brReceivedLatch.await(); + dataset.finalizeBlock(eb); - LOG.info("finalizeBlock finished"); + LOG.info("FinalizeBlock finished"); } catch (Exception e) { LOG.warn("Exception caught. This should not affect the test", e); } @@ -621,13 +637,28 @@ public class TestFsDatasetImpl { res.start(); startFinalizeLatch.await(); + // Verify if block report can be received + // when volume is being removed + final BlockReportThread brt = new BlockReportThread(); + brt.start(); + Set volumesToRemove = new HashSet<>(); volumesToRemove.add( StorageLocation.parse(dataset.getVolume(eb).getBasePath()).getFile()); - LOG.info("Removing volume " + volumesToRemove); - // Verify block report can be received during this - brt.start(); - dataset.removeVolumes(volumesToRemove, true); + /** + * TODO: {@link FsDatasetImpl#removeVolumes(Set, boolean)} is throwing + * IllegalMonitorStateException when there is a parallel reader/writer + * to the volume. Remove below try/catch block after fixing HDFS-10830. + */ + try { + LOG.info("Removing volume " + volumesToRemove); + dataset.removeVolumes(volumesToRemove, true); + } catch (Exception e) { + LOG.info("Unexpected issue while removing volume: ", e); + } finally { + volRemovedLatch.countDown(); + } + LOG.info("Volumes removed"); brReceivedLatch.await(); } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org