Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id CF10617C8A for ; Wed, 21 Jan 2015 04:14:07 +0000 (UTC) Received: (qmail 9364 invoked by uid 500); 21 Jan 2015 04:14:07 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 9303 invoked by uid 500); 21 Jan 2015 04:14:07 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 9292 invoked by uid 99); 21 Jan 2015 04:14:07 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 21 Jan 2015 04:14:07 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 483C8E083A; Wed, 21 Jan 2015 04:14:07 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: cmccabe@apache.org To: common-commits@hadoop.apache.org Message-Id: <42ff91fd5db340769ee086c3388b4de5@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe) Date: Wed, 21 Jan 2015 04:14:07 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk 73b72a048 -> a17584936 HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1758493 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1758493 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1758493 Branch: refs/heads/trunk Commit: a17584936cc5141e3f5612ac3ecf35e27968e439 Parents: 73b72a0 Author: Colin Patrick Mccabe Authored: Tue Jan 20 20:11:09 2015 -0800 Committer: Colin Patrick Mccabe Committed: Tue Jan 20 20:11:09 2015 -0800 ---------------------------------------------------------------------- .../datanode/fsdataset/impl/FsDatasetImpl.java | 16 +++++---- .../datanode/fsdataset/impl/FsVolumeList.java | 8 +++-- .../fsdataset/impl/TestFsDatasetImpl.java | 37 ++++++++++++++++++-- 3 files changed, 49 insertions(+), 12 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1758493/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 5347323..d8cc287 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -342,7 +342,7 @@ class FsDatasetImpl implements FsDatasetSpi { StorageType storageType = location.getStorageType(); final FsVolumeImpl fsVolume = new FsVolumeImpl( - this, sd.getStorageUuid(), dir, this.conf, storageType); + this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType); final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume); ArrayList exceptions = Lists.newArrayList(); @@ -385,19 +385,19 @@ class FsDatasetImpl implements FsDatasetSpi { */ @Override public synchronized void removeVolumes(Collection volumes) { - Set volumeSet = new HashSet(); + Set volumeSet = new HashSet<>(); for (StorageLocation sl : volumes) { - volumeSet.add(sl.getFile()); + volumeSet.add(sl.getFile().getAbsolutePath()); } for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) { Storage.StorageDirectory sd = dataStorage.getStorageDir(idx); - if (volumeSet.contains(sd.getRoot())) { - String volume = sd.getRoot().toString(); + String volume = sd.getRoot().getAbsolutePath(); + if (volumeSet.contains(volume)) { LOG.info("Removing " + volume + " from FsDataset."); // Disable the volume from the service. asyncDiskService.removeVolume(sd.getCurrentDir()); - this.volumes.removeVolume(volume); + this.volumes.removeVolume(sd.getRoot()); // Removed all replica information for the blocks on the volume. Unlike // updating the volumeMap in addVolume(), this operation does not scan @@ -407,7 +407,9 @@ class FsDatasetImpl implements FsDatasetSpi { for (Iterator it = volumeMap.replicas(bpid).iterator(); it.hasNext(); ) { ReplicaInfo block = it.next(); - if (block.getVolume().getBasePath().equals(volume)) { + String absBasePath = + new File(block.getVolume().getBasePath()).getAbsolutePath(); + if (absBasePath.equals(volume)) { invalidate(bpid, block); blocks.add(block); it.remove(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1758493/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index ba19897..c837593 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; +import java.io.File; import java.io.IOException; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; @@ -322,13 +323,16 @@ class FsVolumeList { * Dynamically remove volume in the list. * @param volume the volume to be removed. */ - void removeVolume(String volume) { + void removeVolume(File volume) { // Make a copy of volumes to remove one volume. final FsVolumeImpl[] curVolumes = volumes.get(); final List volumeList = Lists.newArrayList(curVolumes); for (Iterator it = volumeList.iterator(); it.hasNext(); ) { FsVolumeImpl fsVolume = it.next(); - if (fsVolume.getBasePath().equals(volume)) { + String basePath, targetPath; + basePath = new File(fsVolume.getBasePath()).getAbsolutePath(); + targetPath = volume.getAbsolutePath(); + if (basePath.equals(targetPath)) { // Make sure the removed volume is the one in the curVolumes. removeVolume(fsVolume); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1758493/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 0120dfe..ca936b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -46,6 +46,7 @@ import org.mockito.stubbing.Answer; import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -70,6 +71,7 @@ public class TestFsDatasetImpl { private static final String BASE_DIR = new FileSystemTestHelper().getTestRootDir(); private static final int NUM_INIT_VOLUMES = 2; + private static final String CLUSTER_ID = "cluser-id"; private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"}; // Use to generate storageUuid @@ -136,10 +138,11 @@ public class TestFsDatasetImpl { Set expectedVolumes = new HashSet(); List nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { - nsInfos.add(new NamespaceInfo(0, "cluster-id", bpid, 1)); + nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } for (int i = 0; i < numNewVolumes; i++) { String path = BASE_DIR + "/newData" + i; + expectedVolumes.add(path); StorageLocation loc = StorageLocation.parse(path); Storage.StorageDirectory sd = createStorageDirectory(new File(path)); DataStorage.VolumeBuilder builder = @@ -156,7 +159,8 @@ public class TestFsDatasetImpl { Set actualVolumes = new HashSet(); for (int i = 0; i < numNewVolumes; i++) { - dataset.getVolumes().get(numExistingVolumes + i).getBasePath(); + actualVolumes.add( + dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } assertEquals(actualVolumes, expectedVolumes); } @@ -211,6 +215,33 @@ public class TestFsDatasetImpl { } @Test(timeout = 5000) + public void testRemoveNewlyAddedVolume() throws IOException { + final int numExistingVolumes = dataset.getVolumes().size(); + List nsInfos = new ArrayList<>(); + for (String bpid : BLOCK_POOL_IDS) { + nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); + } + String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater"; + StorageLocation loc = StorageLocation.parse(newVolumePath); + + Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath)); + DataStorage.VolumeBuilder builder = + new DataStorage.VolumeBuilder(storage, sd); + when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), + anyListOf(NamespaceInfo.class))) + .thenReturn(builder); + + dataset.addVolume(loc, nsInfos); + assertEquals(numExistingVolumes + 1, dataset.getVolumes().size()); + + when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1); + when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd); + List volumesToRemove = Arrays.asList(loc); + dataset.removeVolumes(volumesToRemove); + assertEquals(numExistingVolumes, dataset.getVolumes().size()); + } + + @Test(timeout = 5000) public void testChangeVolumeWithRunningCheckDirs() throws IOException { RoundRobinVolumeChoosingPolicy blockChooser = new RoundRobinVolumeChoosingPolicy<>(); @@ -234,7 +265,7 @@ public class TestFsDatasetImpl { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { - volumeList.removeVolume("data4"); + volumeList.removeVolume(new File("data4")); volumeList.addVolume(newVolume); return null; }