Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 46C7E200BA0 for ; Thu, 29 Sep 2016 20:13:30 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 45AE4160AC1; Thu, 29 Sep 2016 18:13:30 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 31140160AE9 for ; Thu, 29 Sep 2016 20:13:29 +0200 (CEST) Received: (qmail 77493 invoked by uid 500); 29 Sep 2016 18:13:25 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 75804 invoked by uid 99); 29 Sep 2016 18:13:24 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 29 Sep 2016 18:13:24 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id A9699EF79F; Thu, 29 Sep 2016 18:13:23 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: aengineer@apache.org To: common-commits@hadoop.apache.org Date: Thu, 29 Sep 2016 18:13:39 -0000 Message-Id: In-Reply-To: <39da8aa61d8343e2bce9694dc79cd0c6@git.apache.org> References: <39da8aa61d8343e2bce9694dc79cd0c6@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [17/50] [abbrv] hadoop git commit: HDFS-10828. Fix usage of FsDatasetImpl object lock in ReplicaMap. (Arpit Agarwal) archived-at: Thu, 29 Sep 2016 18:13:30 -0000 HDFS-10828. Fix usage of FsDatasetImpl object lock in ReplicaMap. (Arpit Agarwal) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ae47291 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ae47291 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ae47291 Branch: refs/heads/HDFS-7240 Commit: 8ae4729107d33c6001cf1fdc8837afb71ea6c0d3 Parents: f55eb98 Author: Arpit Agarwal Authored: Tue Sep 27 09:38:29 2016 -0700 Committer: Arpit Agarwal Committed: Tue Sep 27 10:02:15 2016 -0700 ---------------------------------------------------------------------- .../datanode/fsdataset/impl/BlockPoolSlice.java | 3 +- .../datanode/fsdataset/impl/FsDatasetImpl.java | 11 +++--- .../datanode/fsdataset/impl/ReplicaMap.java | 41 ++++++++++---------- .../fsdataset/impl/FsDatasetImplTestUtils.java | 2 +- .../impl/TestInterDatanodeProtocol.java | 3 +- .../datanode/fsdataset/impl/TestReplicaMap.java | 3 +- .../fsdataset/impl/TestWriteToReplica.java | 3 +- 7 files changed, 36 insertions(+), 30 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index b4384b3..29dbb29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -769,7 +770,7 @@ class BlockPoolSlice { private boolean readReplicasFromCache(ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap) { - ReplicaMap tmpReplicaMap = new ReplicaMap(this); + ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock()); File replicaFile = new File(currentDir, REPLICA_CACHE_FILE); // Check whether the file exists or not. if (!replicaFile.exists()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 54b2ce8..26a2e9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -251,7 +251,8 @@ class FsDatasetImpl implements FsDatasetSpi { private boolean blockPinningEnabled; private final int maxDataLength; - private final AutoCloseableLock datasetLock; + @VisibleForTesting + final AutoCloseableLock datasetLock; private final Condition datasetLockCondition; /** @@ -293,7 +294,7 @@ class FsDatasetImpl implements FsDatasetSpi { } storageMap = new ConcurrentHashMap(); - volumeMap = new ReplicaMap(this); + volumeMap = new ReplicaMap(datasetLock); ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this); @SuppressWarnings("unchecked") @@ -419,7 +420,7 @@ class FsDatasetImpl implements FsDatasetSpi { FsVolumeImpl fsVolume = new FsVolumeImpl( this, sd.getStorageUuid(), dir, this.conf, storageType); FsVolumeReference ref = fsVolume.obtainReference(); - ReplicaMap tempVolumeMap = new ReplicaMap(this); + ReplicaMap tempVolumeMap = new ReplicaMap(datasetLock); fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker); activateVolume(tempVolumeMap, sd, storageType, ref); @@ -453,7 +454,7 @@ class FsDatasetImpl implements FsDatasetSpi { StorageType storageType = location.getStorageType(); final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType); - final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume); + final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock()); ArrayList exceptions = Lists.newArrayList(); for (final NamespaceInfo nsInfo : nsInfos) { @@ -2362,7 +2363,7 @@ class FsDatasetImpl implements FsDatasetSpi { Block block, long recoveryId, long xceiverStopTimeout) throws IOException { while (true) { try { - synchronized (map.getMutex()) { + try (AutoCloseableLock lock = map.getLock().acquire()) { return initReplicaRecoveryImpl(bpid, map, block, recoveryId); } } catch (MustStopExistingWriter e) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index 192702e..d4fb69b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -26,13 +26,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.util.FoldedTreeSet; +import org.apache.hadoop.util.AutoCloseableLock; /** * Maintains the replica map. */ class ReplicaMap { - // Object using which this class is synchronized - private final Object mutex; + // Lock object to synchronize this instance. + private final AutoCloseableLock lock; // Map of block pool Id to a set of ReplicaInfo. private final Map> map = new HashMap<>(); @@ -49,16 +50,16 @@ class ReplicaMap { } }; - ReplicaMap(Object mutex) { - if (mutex == null) { + ReplicaMap(AutoCloseableLock lock) { + if (lock == null) { throw new HadoopIllegalArgumentException( - "Object to synchronize on cannot be null"); + "Lock to synchronize on cannot be null"); } - this.mutex = mutex; + this.lock = lock; } String[] getBlockPoolList() { - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { return map.keySet().toArray(new String[map.keySet().size()]); } } @@ -103,7 +104,7 @@ class ReplicaMap { */ ReplicaInfo get(String bpid, long blockId) { checkBlockPool(bpid); - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { FoldedTreeSet set = map.get(bpid); if (set == null) { return null; @@ -123,7 +124,7 @@ class ReplicaMap { ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) { checkBlockPool(bpid); checkBlock(replicaInfo); - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { FoldedTreeSet set = map.get(bpid); if (set == null) { // Add an entry for block pool if it does not exist already @@ -152,7 +153,7 @@ class ReplicaMap { ReplicaInfo remove(String bpid, Block block) { checkBlockPool(bpid); checkBlock(block); - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { FoldedTreeSet set = map.get(bpid); if (set != null) { ReplicaInfo replicaInfo = @@ -175,7 +176,7 @@ class ReplicaMap { */ ReplicaInfo remove(String bpid, long blockId) { checkBlockPool(bpid); - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { FoldedTreeSet set = map.get(bpid); if (set != null) { return set.removeAndGet(blockId, LONG_AND_BLOCK_COMPARATOR); @@ -190,7 +191,7 @@ class ReplicaMap { * @return the number of replicas in the map */ int size(String bpid) { - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { FoldedTreeSet set = map.get(bpid); return set != null ? set.size() : 0; } @@ -199,9 +200,9 @@ class ReplicaMap { /** * Get a collection of the replicas for given block pool * This method is not synchronized. It needs to be synchronized - * externally using the mutex, both for getting the replicas + * externally using the lock, both for getting the replicas * values from the map and iterating over it. Mutex can be accessed using - * {@link #getMutext()} method. + * {@link #getLock()} method. * * @param bpid block pool id * @return a collection of the replicas belonging to the block pool @@ -212,7 +213,7 @@ class ReplicaMap { void initBlockPool(String bpid) { checkBlockPool(bpid); - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { FoldedTreeSet set = map.get(bpid); if (set == null) { // Add an entry for block pool if it does not exist already @@ -224,16 +225,16 @@ class ReplicaMap { void cleanUpBlockPool(String bpid) { checkBlockPool(bpid); - synchronized(mutex) { + try (AutoCloseableLock l = lock.acquire()) { map.remove(bpid); } } /** - * Give access to mutex used for synchronizing ReplicasMap - * @return object used as lock + * Get the lock object used for synchronizing ReplicasMap + * @return lock object */ - Object getMutex() { - return mutex; + AutoCloseableLock getLock() { + return lock; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index e1825f8..a465c05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -407,7 +407,7 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { @Override public Iterator getStoredReplicas(String bpid) throws IOException { // Reload replicas from the disk. - ReplicaMap replicaMap = new ReplicaMap(dataset); + ReplicaMap replicaMap = new ReplicaMap(dataset.datasetLock); try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) { for (FsVolumeSpi vol : refs) { FsVolumeImpl volume = (FsVolumeImpl) vol; http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index c054641..4f6db24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -58,6 +58,7 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.AutoCloseableLock; import org.junit.Assert; import org.junit.Test; @@ -234,7 +235,7 @@ public class TestInterDatanodeProtocol { final long firstblockid = 10000L; final long gs = 7777L; final long length = 22L; - final ReplicaMap map = new ReplicaMap(this); + final ReplicaMap map = new ReplicaMap(new AutoCloseableLock()); String bpid = "BP-TEST"; final Block[] blocks = new Block[5]; for(int i = 0; i < blocks.length; i++) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java index db1cbbc..4fa91b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java @@ -23,6 +23,7 @@ import static org.junit.Assert.fail; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; +import org.apache.hadoop.util.AutoCloseableLock; import org.junit.Before; import org.junit.Test; @@ -30,7 +31,7 @@ import org.junit.Test; * Unit test for ReplicasMap class */ public class TestReplicaMap { - private final ReplicaMap map = new ReplicaMap(TestReplicaMap.class); + private final ReplicaMap map = new ReplicaMap(new AutoCloseableLock()); private final String bpid = "BP-TEST"; private final Block block = new Block(1234, 1234, 1234); http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ae47291/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java index 320af7b..da53cae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.junit.Assert; import org.junit.Test; @@ -534,7 +535,7 @@ public class TestWriteToReplica { bpList.size() == 2); createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn)); - ReplicaMap oldReplicaMap = new ReplicaMap(this); + ReplicaMap oldReplicaMap = new ReplicaMap(new AutoCloseableLock()); oldReplicaMap.addAll(dataSet.volumeMap); cluster.restartDataNode(0); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org