Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 5F07E200D37 for ; Thu, 9 Nov 2017 22:03:55 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 5D894160BEF; Thu, 9 Nov 2017 21:03:55 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 7C8D11609C8 for ; Thu, 9 Nov 2017 22:03:54 +0100 (CET) Received: (qmail 37727 invoked by uid 500); 9 Nov 2017 21:03:53 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 37718 invoked by uid 99); 9 Nov 2017 21:03:53 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Nov 2017 21:03:53 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 7098DDFAEB; Thu, 9 Nov 2017 21:03:53 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: virajith@apache.org To: common-commits@hadoop.apache.org Message-Id: <5c7686650b79439987700ef339ccfa63@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-12776. [READ] Increasing replication for PROVIDED files should create local replicas Date: Thu, 9 Nov 2017 21:03:53 +0000 (UTC) archived-at: Thu, 09 Nov 2017 21:03:55 -0000 Repository: hadoop Updated Branches: refs/heads/HDFS-9806 757ff834a -> a044f5c90 HDFS-12776. [READ] Increasing replication for PROVIDED files should create local replicas Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a044f5c9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a044f5c9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a044f5c9 Branch: refs/heads/HDFS-9806 Commit: a044f5c90254aa4db220ec3028e7d4902de40e99 Parents: 757ff83 Author: Virajith Jalaparti Authored: Thu Nov 9 13:03:41 2017 -0800 Committer: Virajith Jalaparti Committed: Thu Nov 9 13:03:41 2017 -0800 ---------------------------------------------------------------------- .../hdfs/server/blockmanagement/BlockInfo.java | 7 ++-- .../datanode/fsdataset/impl/FsDatasetImpl.java | 25 +++++++++++--- .../TestNameNodeProvidedImplementation.java | 36 +++++++++++--------- 3 files changed, 45 insertions(+), 23 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a044f5c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index eb09b7b..8f59df6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -187,20 +187,23 @@ public abstract class BlockInfo extends Block */ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { int len = getCapacity(); + DatanodeStorageInfo providedStorageInfo = null; for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if(cur != null) { if (cur.getStorageType() == StorageType.PROVIDED) { //if block resides on provided storage, only match the storage ids if (dn.getStorageInfo(cur.getStorageID()) != null) { - return cur; + // do not return here as we have to check the other + // DatanodeStorageInfos for this block which could be local + providedStorageInfo = cur; } } else if (cur.getDatanodeDescriptor() == dn) { return cur; } } } - return null; + return providedStorageInfo; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/a044f5c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 5a892c0..9cfc9dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1521,6 +1521,13 @@ class FsDatasetImpl implements FsDatasetSpi { } } + private boolean isReplicaProvided(ReplicaInfo replicaInfo) { + if (replicaInfo == null) { + return false; + } + return replicaInfo.getVolume().getStorageType() == StorageType.PROVIDED; + } + @Override // FsDatasetSpi public ReplicaHandler createTemporary(StorageType storageType, String storageId, ExtendedBlock b, boolean isTransfer) @@ -1539,12 +1546,14 @@ class FsDatasetImpl implements FsDatasetSpi { isInPipeline = currentReplicaInfo.getState() == ReplicaState.TEMPORARY || currentReplicaInfo.getState() == ReplicaState.RBW; /* - * If the current block is old, reject. + * If the current block is not PROVIDED and old, reject. * else If transfer request, then accept it. * else if state is not RBW/Temporary, then reject + * If current block is PROVIDED, ignore the replica. */ - if ((currentReplicaInfo.getGenerationStamp() >= b.getGenerationStamp()) - || (!isTransfer && !isInPipeline)) { + if (((currentReplicaInfo.getGenerationStamp() >= b + .getGenerationStamp()) || (!isTransfer && !isInPipeline)) + && !isReplicaProvided(currentReplicaInfo)) { throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + currentReplicaInfo.getState() + " and thus cannot be created."); @@ -1564,11 +1573,17 @@ class FsDatasetImpl implements FsDatasetSpi { + " after " + writerStopMs + " miniseconds."); } + // if lastFoundReplicaInfo is PROVIDED and FINALIZED, + // stopWriter isn't required. + if (isReplicaProvided(lastFoundReplicaInfo) && + lastFoundReplicaInfo.getState() == ReplicaState.FINALIZED) { + continue; + } // Stop the previous writer ((ReplicaInPipeline)lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs); } while (true); - - if (lastFoundReplicaInfo != null) { + if (lastFoundReplicaInfo != null + && !isReplicaProvided(lastFoundReplicaInfo)) { // Old blockfile should be deleted synchronously as it might collide // with the new block if allocated in same volume. // Do the deletion outside of lock as its DISK IO. http://git-wip-us.apache.org/repos/asf/hadoop/blob/a044f5c9/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java index f0303b5..1f6aebb 100644 --- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java +++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java @@ -401,33 +401,37 @@ public class TestNameNodeProvidedImplementation { public void testSetReplicationForProvidedFiles() throws Exception { createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, FixedBlockResolver.class); - startCluster(NNDIRPATH, 2, null, - new StorageType[][]{ - {StorageType.PROVIDED}, - {StorageType.DISK}}, + // 10 Datanodes with both DISK and PROVIDED storage + startCluster(NNDIRPATH, 10, + new StorageType[]{ + StorageType.PROVIDED, StorageType.DISK}, + null, false); String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix; Path file = new Path(filename); FileSystem fs = cluster.getFileSystem(); - //set the replication to 2, and test that the file has - //the required replication. - fs.setReplication(file, (short) 2); + // set the replication to 4, and test that the file has + // the required replication. + short newReplication = 4; + LOG.info("Setting replication of file {} to {}", filename, newReplication); + fs.setReplication(file, newReplication); DFSTestUtil.waitForReplication((DistributedFileSystem) fs, - file, (short) 2, 10000); + file, newReplication, 10000); DFSClient client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0)); - getAndCheckBlockLocations(client, filename, 2); + getAndCheckBlockLocations(client, filename, newReplication); - //set the replication back to 1 - fs.setReplication(file, (short) 1); + // set the replication back to 1 + newReplication = 1; + LOG.info("Setting replication of file {} back to {}", + filename, newReplication); + fs.setReplication(file, newReplication); DFSTestUtil.waitForReplication((DistributedFileSystem) fs, - file, (short) 1, 10000); - //the only replica left should be the PROVIDED datanode - DatanodeInfo[] infos = getAndCheckBlockLocations(client, filename, 1); - assertEquals(cluster.getDataNodes().get(0).getDatanodeUuid(), - infos[0].getDatanodeUuid()); + file, newReplication, 10000); + // the only replica left should be the PROVIDED datanode + getAndCheckBlockLocations(client, filename, newReplication); } @Test --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org