Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 515F7200D5C for ; Fri, 15 Dec 2017 19:38:55 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 5005F160C29; Fri, 15 Dec 2017 18:38:55 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 96569160C26 for ; Fri, 15 Dec 2017 19:38:54 +0100 (CET) Received: (qmail 94627 invoked by uid 500); 15 Dec 2017 18:38:51 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 94338 invoked by uid 99); 15 Dec 2017 18:38:51 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 15 Dec 2017 18:38:51 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 2FABDF17DA; Fri, 15 Dec 2017 18:38:48 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: virajith@apache.org To: common-commits@hadoop.apache.org Date: Fri, 15 Dec 2017 18:38:54 -0000 Message-Id: In-Reply-To: <2758d1626bbe4b299c3edd88e5cd8512@git.apache.org> References: <2758d1626bbe4b299c3edd88e5cd8512@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [08/50] [abbrv] hadoop git commit: HDFS-11791. [READ] Test for increasing replication of provided files. archived-at: Fri, 15 Dec 2017 18:38:55 -0000 HDFS-11791. [READ] Test for increasing replication of provided files. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab62326 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab62326 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab62326 Branch: refs/heads/HDFS-9806 Commit: 8ab623269f9d8a813f1917181759547e2e3ba2b4 Parents: e0793a7 Author: Virajith Jalaparti Authored: Wed May 31 10:29:53 2017 -0700 Committer: Virajith Jalaparti Committed: Fri Dec 15 10:18:26 2017 -0800 ---------------------------------------------------------------------- .../TestNameNodeProvidedImplementation.java | 55 ++++++++++++++++++++ 1 file changed, 55 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab62326/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java index 5062439..e171557 100644 --- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java +++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java @@ -23,6 +23,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; @@ -34,10 +35,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockFormatProvider; import org.apache.hadoop.hdfs.server.blockmanagement.BlockProvider; import org.apache.hadoop.hdfs.server.common.BlockFormat; @@ -378,4 +384,53 @@ public class TestNameNodeProvidedImplementation { assertEquals(1, locations.length); assertEquals(2, locations[0].getHosts().length); } + + private DatanodeInfo[] getAndCheckBlockLocations(DFSClient client, + String filename, int expectedLocations) throws IOException { + LocatedBlocks locatedBlocks = client.getLocatedBlocks( + filename, 0, baseFileLen); + //given the start and length in the above call, + //only one LocatedBlock in LocatedBlocks + assertEquals(1, locatedBlocks.getLocatedBlocks().size()); + LocatedBlock locatedBlock = locatedBlocks.getLocatedBlocks().get(0); + assertEquals(expectedLocations, locatedBlock.getLocations().length); + return locatedBlock.getLocations(); + } + + /** + * Tests setting replication of provided files. + * @throws Exception + */ + @Test + public void testSetReplicationForProvidedFiles() throws Exception { + createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, + FixedBlockResolver.class); + startCluster(NNDIRPATH, 2, null, + new StorageType[][] { + {StorageType.PROVIDED}, + {StorageType.DISK}}, + false); + + String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix; + Path file = new Path(filename); + FileSystem fs = cluster.getFileSystem(); + + //set the replication to 2, and test that the file has + //the required replication. + fs.setReplication(file, (short) 2); + DFSTestUtil.waitForReplication((DistributedFileSystem) fs, + file, (short) 2, 10000); + DFSClient client = new DFSClient(new InetSocketAddress("localhost", + cluster.getNameNodePort()), cluster.getConfiguration(0)); + getAndCheckBlockLocations(client, filename, 2); + + //set the replication back to 1 + fs.setReplication(file, (short) 1); + DFSTestUtil.waitForReplication((DistributedFileSystem) fs, + file, (short) 1, 10000); + //the only replica left should be the PROVIDED datanode + DatanodeInfo[] infos = getAndCheckBlockLocations(client, filename, 1); + assertEquals(cluster.getDataNodes().get(0).getDatanodeUuid(), + infos[0].getDatanodeUuid()); + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org