Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 99F7F177FA for ; Thu, 25 Sep 2014 20:51:42 +0000 (UTC) Received: (qmail 9965 invoked by uid 500); 25 Sep 2014 20:51:42 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 9902 invoked by uid 500); 25 Sep 2014 20:51:42 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 9889 invoked by uid 99); 25 Sep 2014 20:51:42 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 25 Sep 2014 20:51:42 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id B50F2880679; Thu, 25 Sep 2014 20:51:41 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: szetszwo@apache.org To: common-commits@hadoop.apache.org Date: Thu, 25 Sep 2014 20:51:41 -0000 Message-Id: <84173ff99d904950b558119aecac8685@git.apache.org> In-Reply-To: <44182ba831fd40a38f4e74f4e4f1c65e@git.apache.org> References: <44182ba831fd40a38f4e74f4e4f1c65e@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [2/6] Merge branch 'HDFS-6584' into trunk http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index d4af3d9..de392e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -255,12 +255,12 @@ public class TestDFSClientRetries { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null)).when(mockNN).getFileInfo(anyString()); + 1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null)) + 1010, 0, null, (byte) 0)) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java index 45b588b..a608ba8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java @@ -64,7 +64,7 @@ public class TestDFSInotifyEventInputStream { */ @Test public void testOpcodeCount() { - Assert.assertTrue(FSEditLogOpCodes.values().length == 46); + Assert.assertTrue(FSEditLogOpCodes.values().length == 47); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 4360052..6119b6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hdfs; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyShort; import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyShort; +import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -341,12 +343,12 @@ public class TestLease { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null)).when(mcp).getFileInfo(anyString()); + 1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null)) + 1010, 0, null, (byte) 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java index e2db636..eec4e99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java @@ -18,22 +18,11 @@ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; - -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.PrintStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.tools.DFSAdmin; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Tool; - import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -89,136 +78,87 @@ public class TestSnapshotCommands { } } - private void toolRun(Tool tool, String cmd, int retcode, String contain) - throws Exception { - String [] cmds = StringUtils.split(cmd, ' '); - System.out.flush(); - System.err.flush(); - PrintStream origOut = System.out; - PrintStream origErr = System.err; - String output = null; - int ret = 0; - try { - ByteArrayOutputStream bs = new ByteArrayOutputStream(1024); - PrintStream out = new PrintStream(bs); - System.setOut(out); - System.setErr(out); - ret = tool.run(cmds); - System.out.flush(); - System.err.flush(); - out.close(); - output = bs.toString(); - } finally { - System.setOut(origOut); - System.setErr(origErr); - } - System.out.println("Output for command: " + cmd + " retcode: " + ret); - if (output != null) { - System.out.println(output); - } - assertEquals(retcode, ret); - if (contain != null) { - assertTrue(output.contains(contain)); - } - } - - private void FsShellRun(String cmd, int retcode, String contain) - throws Exception { - FsShell shell = new FsShell(new Configuration(conf)); - toolRun(shell, cmd, retcode, contain); - } - - private void DFSAdminRun(String cmd, int retcode, String contain) - throws Exception { - DFSAdmin admin = new DFSAdmin(new Configuration(conf)); - toolRun(admin, cmd, retcode, contain); - } - - private void FsShellRun(String cmd) throws Exception { - FsShellRun(cmd, 0, null); - } - @Test public void testAllowSnapshot() throws Exception { // Idempotent test - DFSAdminRun("-allowSnapshot /sub1", 0, "Allowing snaphot on /sub1 succeeded"); + DFSTestUtil.DFSAdminRun("-allowSnapshot /sub1", 0, "Allowing snaphot on /sub1 succeeded", conf); // allow normal dir success - FsShellRun("-mkdir /sub2"); - DFSAdminRun("-allowSnapshot /sub2", 0, "Allowing snaphot on /sub2 succeeded"); + DFSTestUtil.FsShellRun("-mkdir /sub2", conf); + DFSTestUtil.DFSAdminRun("-allowSnapshot /sub2", 0, "Allowing snaphot on /sub2 succeeded", conf); // allow non-exists dir failed - DFSAdminRun("-allowSnapshot /sub3", -1, null); + DFSTestUtil.DFSAdminRun("-allowSnapshot /sub3", -1, null, conf); } @Test public void testCreateSnapshot() throws Exception { // test createSnapshot - FsShellRun("-createSnapshot /sub1 sn0", 0, "Created snapshot /sub1/.snapshot/sn0"); - FsShellRun("-createSnapshot /sub1 sn0", 1, "there is already a snapshot with the same name \"sn0\""); - FsShellRun("-rmr /sub1/sub1sub2"); - FsShellRun("-mkdir /sub1/sub1sub3"); - FsShellRun("-createSnapshot /sub1 sn1", 0, "Created snapshot /sub1/.snapshot/sn1"); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn0", 0, "Created snapshot /sub1/.snapshot/sn0", conf); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn0", 1, "there is already a snapshot with the same name \"sn0\"", conf); + DFSTestUtil.FsShellRun("-rmr /sub1/sub1sub2", conf); + DFSTestUtil.FsShellRun("-mkdir /sub1/sub1sub3", conf); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn1", 0, "Created snapshot /sub1/.snapshot/sn1", conf); // check snapshot contents - FsShellRun("-ls /sub1", 0, "/sub1/sub1sub1"); - FsShellRun("-ls /sub1", 0, "/sub1/sub1sub3"); - FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn0"); - FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn1"); - FsShellRun("-ls /sub1/.snapshot/sn0", 0, "/sub1/.snapshot/sn0/sub1sub1"); - FsShellRun("-ls /sub1/.snapshot/sn0", 0, "/sub1/.snapshot/sn0/sub1sub2"); - FsShellRun("-ls /sub1/.snapshot/sn1", 0, "/sub1/.snapshot/sn1/sub1sub1"); - FsShellRun("-ls /sub1/.snapshot/sn1", 0, "/sub1/.snapshot/sn1/sub1sub3"); + DFSTestUtil.FsShellRun("-ls /sub1", 0, "/sub1/sub1sub1", conf); + DFSTestUtil.FsShellRun("-ls /sub1", 0, "/sub1/sub1sub3", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn0", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn1", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn0", 0, "/sub1/.snapshot/sn0/sub1sub1", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn0", 0, "/sub1/.snapshot/sn0/sub1sub2", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn1", 0, "/sub1/.snapshot/sn1/sub1sub1", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn1", 0, "/sub1/.snapshot/sn1/sub1sub3", conf); } @Test public void testMkdirUsingReservedName() throws Exception { // test can not create dir with reserved name: .snapshot - FsShellRun("-ls /"); - FsShellRun("-mkdir /.snapshot", 1, "File exists"); - FsShellRun("-mkdir /sub1/.snapshot", 1, "File exists"); + DFSTestUtil.FsShellRun("-ls /", conf); + DFSTestUtil.FsShellRun("-mkdir /.snapshot", 1, "File exists", conf); + DFSTestUtil.FsShellRun("-mkdir /sub1/.snapshot", 1, "File exists", conf); // mkdir -p ignore reserved name check if dir already exists - FsShellRun("-mkdir -p /sub1/.snapshot"); - FsShellRun("-mkdir -p /sub1/sub1sub1/.snapshot", 1, "mkdir: \".snapshot\" is a reserved name."); + DFSTestUtil.FsShellRun("-mkdir -p /sub1/.snapshot", conf); + DFSTestUtil.FsShellRun("-mkdir -p /sub1/sub1sub1/.snapshot", 1, "mkdir: \".snapshot\" is a reserved name.", conf); } @Test public void testRenameSnapshot() throws Exception { - FsShellRun("-createSnapshot /sub1 sn.orig"); - FsShellRun("-renameSnapshot /sub1 sn.orig sn.rename"); - FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn.rename"); - FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub1"); - FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub2"); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn.orig", conf); + DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.orig sn.rename", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn.rename", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub1", conf); + DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub2", conf); //try renaming from a non-existing snapshot - FsShellRun("-renameSnapshot /sub1 sn.nonexist sn.rename", 1, - "renameSnapshot: The snapshot sn.nonexist does not exist for directory /sub1"); + DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.nonexist sn.rename", 1, + "renameSnapshot: The snapshot sn.nonexist does not exist for directory /sub1", conf); //try renaming to existing snapshots - FsShellRun("-createSnapshot /sub1 sn.new"); - FsShellRun("-renameSnapshot /sub1 sn.new sn.rename", 1, - "renameSnapshot: The snapshot sn.rename already exists for directory /sub1"); - FsShellRun("-renameSnapshot /sub1 sn.rename sn.new", 1, - "renameSnapshot: The snapshot sn.new already exists for directory /sub1"); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn.new", conf); + DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.new sn.rename", 1, + "renameSnapshot: The snapshot sn.rename already exists for directory /sub1", conf); + DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.rename sn.new", 1, + "renameSnapshot: The snapshot sn.new already exists for directory /sub1", conf); } @Test public void testDeleteSnapshot() throws Exception { - FsShellRun("-createSnapshot /sub1 sn1"); - FsShellRun("-deleteSnapshot /sub1 sn1"); - FsShellRun("-deleteSnapshot /sub1 sn1", 1, - "deleteSnapshot: Cannot delete snapshot sn1 from path /sub1: the snapshot does not exist."); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn1", conf); + DFSTestUtil.FsShellRun("-deleteSnapshot /sub1 sn1", conf); + DFSTestUtil.FsShellRun("-deleteSnapshot /sub1 sn1", 1, + "deleteSnapshot: Cannot delete snapshot sn1 from path /sub1: the snapshot does not exist.", conf); } @Test public void testDisallowSnapshot() throws Exception { - FsShellRun("-createSnapshot /sub1 sn1"); + DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn1", conf); // cannot delete snapshotable dir - FsShellRun("-rmr /sub1", 1, "The directory /sub1 cannot be deleted since /sub1 is snapshottable and already has snapshots"); - DFSAdminRun("-disallowSnapshot /sub1", -1, - "disallowSnapshot: The directory /sub1 has snapshot(s). Please redo the operation after removing all the snapshots."); - FsShellRun("-deleteSnapshot /sub1 sn1"); - DFSAdminRun("-disallowSnapshot /sub1", 0, "Disallowing snaphot on /sub1 succeeded"); + DFSTestUtil.FsShellRun("-rmr /sub1", 1, "The directory /sub1 cannot be deleted since /sub1 is snapshottable and already has snapshots", conf); + DFSTestUtil.DFSAdminRun("-disallowSnapshot /sub1", -1, + "disallowSnapshot: The directory /sub1 has snapshot(s). Please redo the operation after removing all the snapshots.", conf); + DFSTestUtil.FsShellRun("-deleteSnapshot /sub1 sn1", conf); + DFSTestUtil.DFSAdminRun("-disallowSnapshot /sub1", 0, "Disallowing snaphot on /sub1 succeeded", conf); // Idempotent test - DFSAdminRun("-disallowSnapshot /sub1", 0, "Disallowing snaphot on /sub1 succeeded"); + DFSTestUtil.DFSAdminRun("-disallowSnapshot /sub1", 0, "Disallowing snaphot on /sub1 succeeded", conf); // now it can be deleted - FsShellRun("-rmr /sub1"); + DFSTestUtil.FsShellRun("-rmr /sub1", conf); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java new file mode 100644 index 0000000..d6ead09 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyCommands.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test storage policy related DFSAdmin commands + */ +public class TestStoragePolicyCommands { + private static final short REPL = 1; + private static final int SIZE = 128; + + private static Configuration conf; + private static MiniDFSCluster cluster; + private static DistributedFileSystem fs; + + @Before + public void clusterSetUp() throws IOException { + conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + } + + @After + public void clusterShutdown() throws IOException{ + if(fs != null){ + fs.close(); + } + if(cluster != null){ + cluster.shutdown(); + } + } + + @Test + public void testSetAndGetStoragePolicy() throws Exception { + final Path foo = new Path("/foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0); + + DFSTestUtil.DFSAdminRun("-setStoragePolicy /foo WARM", 0, + "Set storage policy WARM on " + foo.toString(), conf); + DFSTestUtil.DFSAdminRun("-setStoragePolicy /foo/bar COLD", 0, + "Set storage policy COLD on " + bar.toString(), conf); + DFSTestUtil.DFSAdminRun("-setStoragePolicy /fooz WARM", -1, + "File/Directory does not exist: /fooz", conf); + + final BlockStoragePolicy.Suite suite = BlockStoragePolicy + .readBlockStorageSuite(conf); + final BlockStoragePolicy warm = suite.getPolicy("WARM"); + final BlockStoragePolicy cold = suite.getPolicy("COLD"); + DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo", 0, + "The storage policy of " + foo.toString() + ":\n" + warm, conf); + DFSTestUtil.DFSAdminRun("-getStoragePolicy /foo/bar", 0, + "The storage policy of " + bar.toString() + ":\n" + cold, conf); + DFSTestUtil.DFSAdminRun("-getStoragePolicy /fooz", -1, + "File/Directory does not exist: /fooz", conf); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index f270b32..1df31ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -89,7 +89,14 @@ public class TestBalancer { private static final Random r = new Random(); static { + initTestSetup(); + } + + public static void initTestSetup() { Dispatcher.setBlockMoveWaitTime(1000L) ; + + // do not create id file since it occupies the disk space + NameNodeConnector.setWrite2IdFile(false); } static void initConf(Configuration conf) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index d9d70d1..bd91366 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -44,7 +44,7 @@ public class TestBalancerWithHANameNodes { ClientProtocol client; static { - Dispatcher.setBlockMoveWaitTime(1000L); + TestBalancer.initTestSetup(); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index a16a979..6ee6e54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -73,7 +73,7 @@ public class TestBalancerWithMultipleNameNodes { private static final Random RANDOM = new Random(); static { - Dispatcher.setBlockMoveWaitTime(1000L) ; + TestBalancer.initTestSetup(); } /** Common objects used in various methods. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 9961a2e..7af3a0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -75,7 +75,7 @@ public class TestBalancerWithNodeGroup { static final int DEFAULT_BLOCK_SIZE = 100; static { - Dispatcher.setBlockMoveWaitTime(1000L) ; + TestBalancer.initTestSetup(); } static Configuration createConf() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 1a8262f..7c0623c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; @@ -52,6 +53,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; +import org.junit.Assert; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Before; import org.junit.Test; @@ -607,7 +609,6 @@ public class TestBlockManager { assertEquals(1, ds.getBlockReportCount()); } - /** * Tests that a namenode doesn't choose a datanode with full disks to * store blocks. @@ -654,5 +655,20 @@ public class TestBlockManager { cluster.shutdown(); } } -} + @Test + public void testUseDelHint() { + DatanodeStorageInfo delHint = new DatanodeStorageInfo( + DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id")); + List moreThan1Racks = Arrays.asList(delHint); + List excessTypes = new ArrayList(); + + excessTypes.add(StorageType.DEFAULT); + Assert.assertTrue(BlockManager.useDelHint(true, delHint, null, + moreThan1Racks, excessTypes)); + excessTypes.remove(0); + excessTypes.add(StorageType.SSD); + Assert.assertFalse(BlockManager.useDelHint(true, delHint, null, + moreThan1Racks, excessTypes)); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 7390b5a..b7ffe74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; @@ -45,10 +46,15 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; @@ -222,7 +228,7 @@ public class TestReplicationPolicy { List chosenNodes, Set excludedNodes) { return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes, - false, excludedNodes, BLOCK_SIZE, StorageType.DEFAULT); + false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); } /** @@ -289,7 +295,7 @@ public class TestReplicationPolicy { excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, - excludedNodes, BLOCK_SIZE, StorageType.DEFAULT); + excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2, targets.length); //make sure that the chosen node is in the target. @@ -624,7 +630,7 @@ public class TestReplicationPolicy { .getNamesystem().getBlockManager().getBlockPlacementPolicy(); DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 3, staleNodeInfo, new ArrayList(), false, null, - BLOCK_SIZE, StorageType.DEFAULT); + BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); assertEquals(targets.length, 3); assertFalse(isOnSameRack(targets[0], staleNodeInfo)); @@ -650,7 +656,7 @@ public class TestReplicationPolicy { // Call chooseTarget targets = replicator.chooseTarget(filename, 3, staleNodeInfo, new ArrayList(), false, null, BLOCK_SIZE, - StorageType.DEFAULT); + TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); assertEquals(targets.length, 3); assertTrue(isOnSameRack(targets[0], staleNodeInfo)); @@ -927,8 +933,16 @@ public class TestReplicationPolicy { // replica nodes, while storages[2] and dataNodes[5] are in second set. assertEquals(2, first.size()); assertEquals(2, second.size()); + List excessTypes = new ArrayList(); + { + // test returning null + excessTypes.add(StorageType.SSD); + assertNull(replicator.chooseReplicaToDelete( + null, null, (short)3, first, second, excessTypes)); + } + excessTypes.add(StorageType.DEFAULT); DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete( - null, null, (short)3, first, second); + null, null, (short)3, first, second, excessTypes); // Within first set, storages[1] with less free space assertEquals(chosen, storages[1]); @@ -936,11 +950,12 @@ public class TestReplicationPolicy { assertEquals(0, first.size()); assertEquals(3, second.size()); // Within second set, storages[5] with less free space + excessTypes.add(StorageType.DEFAULT); chosen = replicator.chooseReplicaToDelete( - null, null, (short)2, first, second); + null, null, (short)2, first, second, excessTypes); assertEquals(chosen, storages[5]); } - + /** * This testcase tests whether the default value returned by * DFSUtil.getInvalidateWorkPctPerIteration() is positive, http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java index 1b003fb..4a199dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java @@ -33,13 +33,12 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -147,7 +146,7 @@ public class TestReplicationPolicyConsiderLoad { DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager() .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3, dataNodes[0], new ArrayList(), false, null, - 1024, StorageType.DEFAULT); + 1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); assertEquals(3, targets.length); Set targetSet = new HashSet( http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index b615876..526c490 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; @@ -258,7 +259,7 @@ public class TestReplicationPolicyWithNodeGroup { List chosenNodes, Set excludedNodes) { return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes, - false, excludedNodes, BLOCK_SIZE, StorageType.DEFAULT); + false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); } /** @@ -340,7 +341,7 @@ public class TestReplicationPolicyWithNodeGroup { Set excludedNodes = new HashSet(); excludedNodes.add(dataNodes[1]); targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false, - excludedNodes, BLOCK_SIZE, StorageType.DEFAULT); + excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); assertEquals(targets.length, 4); assertEquals(storages[0], targets[0]); @@ -358,7 +359,7 @@ public class TestReplicationPolicyWithNodeGroup { excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, - excludedNodes, BLOCK_SIZE, StorageType.DEFAULT); + excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2, targets.length); //make sure that the chosen node is in the target. @@ -612,8 +613,10 @@ public class TestReplicationPolicyWithNodeGroup { replicaList, rackMap, first, second); assertEquals(3, first.size()); assertEquals(1, second.size()); + List excessTypes = new ArrayList(); + excessTypes.add(StorageType.DEFAULT); DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete( - null, null, (short)3, first, second); + null, null, (short)3, first, second, excessTypes); // Within first set {dataNodes[0], dataNodes[1], dataNodes[2]}, // dataNodes[0] and dataNodes[1] are in the same nodegroup, // but dataNodes[1] is chosen as less free space @@ -624,16 +627,18 @@ public class TestReplicationPolicyWithNodeGroup { assertEquals(1, second.size()); // Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen // as less free space + excessTypes.add(StorageType.DEFAULT); chosen = replicator.chooseReplicaToDelete( - null, null, (short)2, first, second); + null, null, (short)2, first, second, excessTypes); assertEquals(chosen, storages[2]); replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen); assertEquals(0, first.size()); assertEquals(2, second.size()); // Within second set, dataNodes[5] with less free space + excessTypes.add(StorageType.DEFAULT); chosen = replicator.chooseReplicaToDelete( - null, null, (short)1, first, second); + null, null, (short)1, first, second, excessTypes); assertEquals(chosen, storages[5]); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java new file mode 100644 index 0000000..5866c7f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.mover; + +import java.io.IOException; +import java.net.URI; +import java.util.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.*; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DBlock; +import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; +import org.apache.hadoop.hdfs.server.mover.Mover.MLocation; +import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.GenericOptionsParser; +import org.junit.Assert; +import org.junit.Test; + +public class TestMover { + static Mover newMover(Configuration conf) throws IOException { + final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Assert.assertEquals(1, namenodes.size()); + + final List nncs = NameNodeConnector.newNameNodeConnectors( + namenodes, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf); + return new Mover(nncs.get(0), conf); + } + + @Test + public void testScheduleSameBlock() throws IOException { + final Configuration conf = new HdfsConfiguration(); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(4).build(); + try { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + final String file = "/testScheduleSameBlock/file"; + + { + final FSDataOutputStream out = dfs.create(new Path(file)); + out.writeChars("testScheduleSameBlock"); + out.close(); + } + + final Mover mover = newMover(conf); + mover.init(); + final Mover.Processor processor = mover.new Processor(); + + final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); + final List locations = MLocation.toLocations(lb); + final MLocation ml = locations.get(0); + final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations); + + final List storageTypes = new ArrayList( + Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT)); + Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes)); + Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes)); + } finally { + cluster.shutdown(); + } + } + + private void checkMovePaths(List actual, Path... expected) { + Assert.assertEquals(expected.length, actual.size()); + for (Path p : expected) { + Assert.assertTrue(actual.contains(p)); + } + } + + /** + * Test Mover Cli by specifying a list of files/directories using option "-p". + * There is only one namenode (and hence name service) specified in the conf. + */ + @Test + public void testMoverCli() throws Exception { + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new HdfsConfiguration()).numDataNodes(0).build(); + try { + final Configuration conf = cluster.getConfiguration(0); + try { + Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar"); + Assert.fail("Expected exception for illegal path bar"); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("bar is not absolute", e); + } + + Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf); + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Assert.assertEquals(1, namenodes.size()); + Assert.assertEquals(1, movePaths.size()); + URI nn = namenodes.iterator().next(); + Assert.assertTrue(movePaths.containsKey(nn)); + Assert.assertNull(movePaths.get(nn)); + + movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); + namenodes = DFSUtil.getNsServiceRpcUris(conf); + Assert.assertEquals(1, movePaths.size()); + nn = namenodes.iterator().next(); + Assert.assertTrue(movePaths.containsKey(nn)); + checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testMoverCliWithHAConf() throws Exception { + final Configuration conf = new HdfsConfiguration(); + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new HdfsConfiguration()) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(0).build(); + HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster"); + try { + Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf, + "-p", "/foo", "/bar"); + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Assert.assertEquals(1, namenodes.size()); + Assert.assertEquals(1, movePaths.size()); + URI nn = namenodes.iterator().next(); + Assert.assertEquals(new URI("hdfs://MyCluster"), nn); + Assert.assertTrue(movePaths.containsKey(nn)); + checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar")); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testMoverCliWithFederation() throws Exception { + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new HdfsConfiguration()) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3)) + .numDataNodes(0).build(); + final Configuration conf = new HdfsConfiguration(); + DFSTestUtil.setFederatedConfiguration(cluster, conf); + try { + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Assert.assertEquals(3, namenodes.size()); + + try { + Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo"); + Assert.fail("Expect exception for missing authority information"); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "does not contain scheme and authority", e); + } + + try { + Mover.Cli.getNameNodePathsToMove(conf, "-p", "hdfs:///foo"); + Assert.fail("Expect exception for missing authority information"); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "does not contain scheme and authority", e); + } + + try { + Mover.Cli.getNameNodePathsToMove(conf, "-p", "wrong-hdfs://ns1/foo"); + Assert.fail("Expect exception for wrong scheme"); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Cannot resolve the path", e); + } + + Iterator iter = namenodes.iterator(); + URI nn1 = iter.next(); + URI nn2 = iter.next(); + Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf, + "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar"); + Assert.assertEquals(2, movePaths.size()); + checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar")); + checkMovePaths(movePaths.get(nn2), new Path("/foo/bar")); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testMoverCliWithFederationHA() throws Exception { + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new HdfsConfiguration()) + .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3)) + .numDataNodes(0).build(); + final Configuration conf = new HdfsConfiguration(); + DFSTestUtil.setFederatedHAConfiguration(cluster, conf); + try { + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Assert.assertEquals(3, namenodes.size()); + + Iterator iter = namenodes.iterator(); + URI nn1 = iter.next(); + URI nn2 = iter.next(); + URI nn3 = iter.next(); + Map> movePaths = Mover.Cli.getNameNodePathsToMove(conf, + "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar"); + Assert.assertEquals(3, movePaths.size()); + checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar")); + checkMovePaths(movePaths.get(nn2), new Path("/foo/bar")); + checkMovePaths(movePaths.get(nn3), new Path("/foobar")); + } finally { + cluster.shutdown(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java new file mode 100644 index 0000000..ad813cb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -0,0 +1,766 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.mover; + +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.BlockStoragePolicy; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.hadoop.hdfs.server.balancer.Dispatcher; +import org.apache.hadoop.hdfs.server.balancer.ExitStatus; +import org.apache.hadoop.hdfs.server.balancer.TestBalancer; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; +import org.apache.hadoop.io.IOUtils; +import org.apache.log4j.Level; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; + +/** + * Test the data migration tool (for Archival Storage) + */ +public class TestStorageMover { + static final Log LOG = LogFactory.getLog(TestStorageMover.class); + static { + ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class) + ).getLogger().setLevel(Level.ALL); + ((Log4JLogger)LogFactory.getLog(Dispatcher.class) + ).getLogger().setLevel(Level.ALL); + ((Log4JLogger)LogFactory.getLog(DataTransferProtocol.class)).getLogger() + .setLevel(Level.ALL); + } + + private static final int BLOCK_SIZE = 1024; + private static final short REPL = 3; + private static final int NUM_DATANODES = 6; + private static final Configuration DEFAULT_CONF = new HdfsConfiguration(); + private static final BlockStoragePolicy.Suite DEFAULT_POLICIES; + private static final BlockStoragePolicy HOT; + private static final BlockStoragePolicy WARM; + private static final BlockStoragePolicy COLD; + + static { + DEFAULT_CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + DEFAULT_CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + DEFAULT_CONF.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + 2L); + DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L); + + DEFAULT_POLICIES = BlockStoragePolicy.readBlockStorageSuite(DEFAULT_CONF); + HOT = DEFAULT_POLICIES.getPolicy("HOT"); + WARM = DEFAULT_POLICIES.getPolicy("WARM"); + COLD = DEFAULT_POLICIES.getPolicy("COLD"); + TestBalancer.initTestSetup(); + Dispatcher.setDelayAfterErrors(1000L); + } + + /** + * This scheme defines files/directories and their block storage policies. It + * also defines snapshots. + */ + static class NamespaceScheme { + final List dirs; + final List files; + final long fileSize; + final Map> snapshotMap; + final Map policyMap; + + NamespaceScheme(List dirs, List files, long fileSize, + Map> snapshotMap, + Map policyMap) { + this.dirs = dirs == null? Collections.emptyList(): dirs; + this.files = files == null? Collections.emptyList(): files; + this.fileSize = fileSize; + this.snapshotMap = snapshotMap == null ? + Collections.>emptyMap() : snapshotMap; + this.policyMap = policyMap; + } + + /** + * Create files/directories/snapshots. + */ + void prepare(DistributedFileSystem dfs, short repl) throws Exception { + for (Path d : dirs) { + dfs.mkdirs(d); + } + for (Path file : files) { + DFSTestUtil.createFile(dfs, file, fileSize, repl, 0L); + } + for (Map.Entry> entry : snapshotMap.entrySet()) { + for (String snapshot : entry.getValue()) { + SnapshotTestHelper.createSnapshot(dfs, entry.getKey(), snapshot); + } + } + } + + /** + * Set storage policies according to the corresponding scheme. + */ + void setStoragePolicy(DistributedFileSystem dfs) throws Exception { + for (Map.Entry entry : policyMap.entrySet()) { + dfs.setStoragePolicy(entry.getKey(), entry.getValue().getName()); + } + } + } + + /** + * This scheme defines DataNodes and their storage, including storage types + * and remaining capacities. + */ + static class ClusterScheme { + final Configuration conf; + final int numDataNodes; + final short repl; + final StorageType[][] storageTypes; + final long[][] storageCapacities; + + ClusterScheme() { + this(DEFAULT_CONF, NUM_DATANODES, REPL, + genStorageTypes(NUM_DATANODES), null); + } + + ClusterScheme(Configuration conf, int numDataNodes, short repl, + StorageType[][] types, long[][] capacities) { + Preconditions.checkArgument(types == null || types.length == numDataNodes); + Preconditions.checkArgument(capacities == null || capacities.length == + numDataNodes); + this.conf = conf; + this.numDataNodes = numDataNodes; + this.repl = repl; + this.storageTypes = types; + this.storageCapacities = capacities; + } + } + + class MigrationTest { + private final ClusterScheme clusterScheme; + private final NamespaceScheme nsScheme; + private final Configuration conf; + + private MiniDFSCluster cluster; + private DistributedFileSystem dfs; + private final BlockStoragePolicy.Suite policies; + + MigrationTest(ClusterScheme cScheme, NamespaceScheme nsScheme) { + this.clusterScheme = cScheme; + this.nsScheme = nsScheme; + this.conf = clusterScheme.conf; + this.policies = BlockStoragePolicy.readBlockStorageSuite(conf); + } + + /** + * Set up the cluster and start NameNode and DataNodes according to the + * corresponding scheme. + */ + void setupCluster() throws Exception { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(clusterScheme + .numDataNodes).storageTypes(clusterScheme.storageTypes) + .storageCapacities(clusterScheme.storageCapacities).build(); + cluster.waitActive(); + dfs = cluster.getFileSystem(); + } + + private void runBasicTest(boolean shutdown) throws Exception { + setupCluster(); + try { + prepareNamespace(); + verify(true); + + setStoragePolicy(); + migrate(); + verify(true); + } finally { + if (shutdown) { + shutdownCluster(); + } + } + } + + void shutdownCluster() throws Exception { + IOUtils.cleanup(null, dfs); + if (cluster != null) { + cluster.shutdown(); + } + } + + /** + * Create files/directories and set their storage policies according to the + * corresponding scheme. + */ + void prepareNamespace() throws Exception { + nsScheme.prepare(dfs, clusterScheme.repl); + } + + void setStoragePolicy() throws Exception { + nsScheme.setStoragePolicy(dfs); + } + + /** + * Run the migration tool. + */ + void migrate() throws Exception { + runMover(); + Thread.sleep(5000); // let the NN finish deletion + } + + /** + * Verify block locations after running the migration tool. + */ + void verify(boolean verifyAll) throws Exception { + for (DataNode dn : cluster.getDataNodes()) { + DataNodeTestUtils.triggerBlockReport(dn); + } + if (verifyAll) { + verifyNamespace(); + } else { + // TODO verify according to the given path list + + } + } + + private void runMover() throws Exception { + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Map> nnMap = Maps.newHashMap(); + for (URI nn : namenodes) { + nnMap.put(nn, null); + } + int result = Mover.run(nnMap, conf); + Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result); + } + + private void verifyNamespace() throws Exception { + HdfsFileStatus status = dfs.getClient().getFileInfo("/"); + verifyRecursively(null, status); + } + + private void verifyRecursively(final Path parent, + final HdfsFileStatus status) throws Exception { + if (status.isDir()) { + Path fullPath = parent == null ? + new Path("/") : status.getFullPath(parent); + DirectoryListing children = dfs.getClient().listPaths( + fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true); + for (HdfsFileStatus child : children.getPartialListing()) { + verifyRecursively(fullPath, child); + } + } else if (!status.isSymlink()) { // is file + verifyFile(parent, status, null); + } + } + + void verifyFile(final Path file, final Byte expectedPolicyId) + throws Exception { + final Path parent = file.getParent(); + DirectoryListing children = dfs.getClient().listPaths( + parent.toString(), HdfsFileStatus.EMPTY_NAME, true); + for (HdfsFileStatus child : children.getPartialListing()) { + if (child.getLocalName().equals(file.getName())) { + verifyFile(parent, child, expectedPolicyId); + return; + } + } + Assert.fail("File " + file + " not found."); + } + + private void verifyFile(final Path parent, final HdfsFileStatus status, + final Byte expectedPolicyId) throws Exception { + HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status; + byte policyId = fileStatus.getStoragePolicy(); + BlockStoragePolicy policy = policies.getPolicy(policyId); + if (expectedPolicyId != null) { + Assert.assertEquals((byte)expectedPolicyId, policy.getId()); + } + final List types = policy.chooseStorageTypes( + status.getReplication()); + for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) { + final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types, + lb.getStorageTypes()); + Assert.assertTrue(fileStatus.getFullName(parent.toString()) + + " with policy " + policy + " has non-empty overlap: " + diff + + ", the corresponding block is " + lb.getBlock().getLocalBlock(), + diff.removeOverlap()); + } + } + + Replication getReplication(Path file) throws IOException { + return getOrVerifyReplication(file, null); + } + + Replication verifyReplication(Path file, int expectedDiskCount, + int expectedArchiveCount) throws IOException { + final Replication r = new Replication(); + r.disk = expectedDiskCount; + r.archive = expectedArchiveCount; + return getOrVerifyReplication(file, r); + } + + private Replication getOrVerifyReplication(Path file, Replication expected) + throws IOException { + final List lbs = dfs.getClient().getLocatedBlocks( + file.toString(), 0).getLocatedBlocks(); + Assert.assertEquals(1, lbs.size()); + + LocatedBlock lb = lbs.get(0); + StringBuilder types = new StringBuilder(); + final Replication r = new Replication(); + for(StorageType t : lb.getStorageTypes()) { + types.append(t).append(", "); + if (t == StorageType.DISK) { + r.disk++; + } else if (t == StorageType.ARCHIVE) { + r.archive++; + } else { + Assert.fail("Unexpected storage type " + t); + } + } + + if (expected != null) { + final String s = "file = " + file + "\n types = [" + types + "]"; + Assert.assertEquals(s, expected, r); + } + return r; + } + } + + static class Replication { + int disk; + int archive; + + @Override + public int hashCode() { + return disk ^ archive; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (obj == null || !(obj instanceof Replication)) { + return false; + } + final Replication that = (Replication)obj; + return this.disk == that.disk && this.archive == that.archive; + } + + @Override + public String toString() { + return "[disk=" + disk + ", archive=" + archive + "]"; + } + } + + private static StorageType[][] genStorageTypes(int numDataNodes) { + return genStorageTypes(numDataNodes, 0, 0); + } + + private static StorageType[][] genStorageTypes(int numDataNodes, + int numAllDisk, int numAllArchive) { + StorageType[][] types = new StorageType[numDataNodes][]; + int i = 0; + for (; i < numAllDisk; i++) { + types[i] = new StorageType[]{StorageType.DISK, StorageType.DISK}; + } + for (; i < numAllDisk + numAllArchive; i++) { + types[i] = new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE}; + } + for (; i < types.length; i++) { + types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}; + } + return types; + } + + private static long[][] genCapacities(int nDatanodes, int numAllDisk, + int numAllArchive, long diskCapacity, long archiveCapacity) { + final long[][] capacities = new long[nDatanodes][]; + int i = 0; + for (; i < numAllDisk; i++) { + capacities[i] = new long[]{diskCapacity, diskCapacity}; + } + for (; i < numAllDisk + numAllArchive; i++) { + capacities[i] = new long[]{archiveCapacity, archiveCapacity}; + } + for(; i < capacities.length; i++) { + capacities[i] = new long[]{diskCapacity, archiveCapacity}; + } + return capacities; + } + + private static class PathPolicyMap { + final Map map = Maps.newHashMap(); + final Path hot = new Path("/hot"); + final Path warm = new Path("/warm"); + final Path cold = new Path("/cold"); + final List files; + + PathPolicyMap(int filesPerDir){ + map.put(hot, HOT); + map.put(warm, WARM); + map.put(cold, COLD); + files = new ArrayList(); + for(Path dir : map.keySet()) { + for(int i = 0; i < filesPerDir; i++) { + files.add(new Path(dir, "file" + i)); + } + } + } + + NamespaceScheme newNamespaceScheme() { + return new NamespaceScheme(Arrays.asList(hot, warm, cold), + files, BLOCK_SIZE/2, null, map); + } + + /** + * Move hot files to warm and cold, warm files to hot and cold, + * and cold files to hot and warm. + */ + void moveAround(DistributedFileSystem dfs) throws Exception { + for(Path srcDir : map.keySet()) { + int i = 0; + for(Path dstDir : map.keySet()) { + if (!srcDir.equals(dstDir)) { + final Path src = new Path(srcDir, "file" + i++); + final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName()); + LOG.info("rename " + src + " to " + dst); + dfs.rename(src, dst); + } + } + } + } + } + + /** + * A normal case for Mover: move a file into archival storage + */ + @Test + public void testMigrateFileToArchival() throws Exception { + LOG.info("testMigrateFileToArchival"); + final Path foo = new Path("/foo"); + Map policyMap = Maps.newHashMap(); + policyMap.put(foo, COLD); + NamespaceScheme nsScheme = new NamespaceScheme(null, Arrays.asList(foo), + 2*BLOCK_SIZE, null, policyMap); + ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, + NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null); + new MigrationTest(clusterScheme, nsScheme).runBasicTest(true); + } + + /** + * Print a big banner in the test log to make debug easier. + */ + static void banner(String string) { + LOG.info("\n\n\n\n================================================\n" + + string + "\n" + + "==================================================\n\n"); + } + + /** + * Move an open file into archival storage + */ + @Test + public void testMigrateOpenFileToArchival() throws Exception { + LOG.info("testMigrateOpenFileToArchival"); + final Path fooDir = new Path("/foo"); + Map policyMap = Maps.newHashMap(); + policyMap.put(fooDir, COLD); + NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, + BLOCK_SIZE, null, policyMap); + ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, + NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null); + MigrationTest test = new MigrationTest(clusterScheme, nsScheme); + test.setupCluster(); + + // create an open file + banner("writing to file /foo/bar"); + final Path barFile = new Path(fooDir, "bar"); + DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L); + FSDataOutputStream out = test.dfs.append(barFile); + out.writeBytes("hello, "); + ((DFSOutputStream) out.getWrappedStream()).hsync(); + + try { + banner("start data migration"); + test.setStoragePolicy(); // set /foo to COLD + test.migrate(); + + // make sure the under construction block has not been migrated + LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks( + barFile.toString(), BLOCK_SIZE); + LOG.info("Locations: " + lbs); + List blks = lbs.getLocatedBlocks(); + Assert.assertEquals(1, blks.size()); + Assert.assertEquals(1, blks.get(0).getLocations().length); + + banner("finish the migration, continue writing"); + // make sure the writing can continue + out.writeBytes("world!"); + ((DFSOutputStream) out.getWrappedStream()).hsync(); + IOUtils.cleanup(LOG, out); + + lbs = test.dfs.getClient().getLocatedBlocks( + barFile.toString(), BLOCK_SIZE); + LOG.info("Locations: " + lbs); + blks = lbs.getLocatedBlocks(); + Assert.assertEquals(1, blks.size()); + Assert.assertEquals(1, blks.get(0).getLocations().length); + + banner("finish writing, starting reading"); + // check the content of /foo/bar + FSDataInputStream in = test.dfs.open(barFile); + byte[] buf = new byte[13]; + // read from offset 1024 + in.readFully(BLOCK_SIZE, buf, 0, buf.length); + IOUtils.cleanup(LOG, in); + Assert.assertEquals("hello, world!", new String(buf)); + } finally { + test.shutdownCluster(); + } + } + + /** + * Test directories with Hot, Warm and Cold polices. + */ + @Test + public void testHotWarmColdDirs() throws Exception { + LOG.info("testHotWarmColdDirs"); + PathPolicyMap pathPolicyMap = new PathPolicyMap(3); + NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme(); + ClusterScheme clusterScheme = new ClusterScheme(); + MigrationTest test = new MigrationTest(clusterScheme, nsScheme); + + try { + test.runBasicTest(false); + pathPolicyMap.moveAround(test.dfs); + test.migrate(); + + test.verify(true); + } finally { + test.shutdownCluster(); + } + } + + private void waitForAllReplicas(int expectedReplicaNum, Path file, + DistributedFileSystem dfs) throws Exception { + for (int i = 0; i < 5; i++) { + LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, + BLOCK_SIZE); + LocatedBlock lb = lbs.get(0); + if (lb.getLocations().length >= expectedReplicaNum) { + return; + } else { + Thread.sleep(1000); + } + } + } + + /** + * Test DISK is running out of spaces. + */ + @Test + public void testNoSpaceDisk() throws Exception { + LOG.info("testNoSpaceDisk"); + final PathPolicyMap pathPolicyMap = new PathPolicyMap(0); + final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme(); + + final long diskCapacity = (6 + HdfsConstants.MIN_BLOCKS_FOR_WRITE) + * BLOCK_SIZE; + final long archiveCapacity = 100 * BLOCK_SIZE; + final long[][] capacities = genCapacities(NUM_DATANODES, 1, 1, + diskCapacity, archiveCapacity); + Configuration conf = new Configuration(DEFAULT_CONF); + final ClusterScheme clusterScheme = new ClusterScheme(conf, + NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES, 1, 1), capacities); + final MigrationTest test = new MigrationTest(clusterScheme, nsScheme); + + try { + test.runBasicTest(false); + + // create hot files with replication 3 until not more spaces. + final short replication = 3; + { + int hotFileCount = 0; + try { + for (; ; hotFileCount++) { + final Path p = new Path(pathPolicyMap.hot, "file" + hotFileCount); + DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L); + waitForAllReplicas(replication, p, test.dfs); + } + } catch (IOException e) { + LOG.info("Expected: hotFileCount=" + hotFileCount, e); + } + Assert.assertTrue(hotFileCount >= 1); + } + + // create hot files with replication 1 to use up all remaining spaces. + { + int hotFileCount_r1 = 0; + try { + for (; ; hotFileCount_r1++) { + final Path p = new Path(pathPolicyMap.hot, "file_r1_" + hotFileCount_r1); + DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 1, 0L); + waitForAllReplicas(1, p, test.dfs); + } + } catch (IOException e) { + LOG.info("Expected: hotFileCount_r1=" + hotFileCount_r1, e); + } + } + + { // test increasing replication. Since DISK is full, + // new replicas should be stored in ARCHIVE as a fallback storage. + final Path file0 = new Path(pathPolicyMap.hot, "file0"); + final Replication r = test.getReplication(file0); + final short newReplication = (short) 5; + test.dfs.setReplication(file0, newReplication); + Thread.sleep(10000); + test.verifyReplication(file0, r.disk, newReplication - r.disk); + } + + { // test creating a cold file and then increase replication + final Path p = new Path(pathPolicyMap.cold, "foo"); + DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L); + test.verifyReplication(p, 0, replication); + + final short newReplication = 5; + test.dfs.setReplication(p, newReplication); + Thread.sleep(10000); + test.verifyReplication(p, 0, newReplication); + } + + { //test move a hot file to warm + final Path file1 = new Path(pathPolicyMap.hot, "file1"); + test.dfs.rename(file1, pathPolicyMap.warm); + test.migrate(); + test.verifyFile(new Path(pathPolicyMap.warm, "file1"), WARM.getId()); + } + } finally { + test.shutdownCluster(); + } + } + + /** + * Test ARCHIVE is running out of spaces. + */ + @Test + public void testNoSpaceArchive() throws Exception { + LOG.info("testNoSpaceArchive"); + final PathPolicyMap pathPolicyMap = new PathPolicyMap(0); + final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme(); + + final long diskCapacity = 100 * BLOCK_SIZE; + final long archiveCapacity = (6 + HdfsConstants.MIN_BLOCKS_FOR_WRITE) + * BLOCK_SIZE; + final long[][] capacities = genCapacities(NUM_DATANODES, 1, 1, + diskCapacity, archiveCapacity); + final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, + NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES, 1, 1), capacities); + final MigrationTest test = new MigrationTest(clusterScheme, nsScheme); + + try { + test.runBasicTest(false); + + // create cold files with replication 3 until not more spaces. + final short replication = 3; + { + int coldFileCount = 0; + try { + for (; ; coldFileCount++) { + final Path p = new Path(pathPolicyMap.cold, "file" + coldFileCount); + DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L); + waitForAllReplicas(replication, p, test.dfs); + } + } catch (IOException e) { + LOG.info("Expected: coldFileCount=" + coldFileCount, e); + } + Assert.assertTrue(coldFileCount >= 1); + } + + // create cold files with replication 1 to use up all remaining spaces. + { + int coldFileCount_r1 = 0; + try { + for (; ; coldFileCount_r1++) { + final Path p = new Path(pathPolicyMap.cold, "file_r1_" + coldFileCount_r1); + DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 1, 0L); + waitForAllReplicas(1, p, test.dfs); + } + } catch (IOException e) { + LOG.info("Expected: coldFileCount_r1=" + coldFileCount_r1, e); + } + } + + { // test increasing replication but new replicas cannot be created + // since no more ARCHIVE space. + final Path file0 = new Path(pathPolicyMap.cold, "file0"); + final Replication r = test.getReplication(file0); + LOG.info("XXX " + file0 + ": replication=" + r); + Assert.assertEquals(0, r.disk); + + final short newReplication = (short) 5; + test.dfs.setReplication(file0, newReplication); + Thread.sleep(10000); + + test.verifyReplication(file0, 0, r.archive); + } + + { // test creating a hot file + final Path p = new Path(pathPolicyMap.hot, "foo"); + DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 3, 0L); + } + + { //test move a cold file to warm + final Path file1 = new Path(pathPolicyMap.cold, "file1"); + test.dfs.rename(file1, pathPolicyMap.warm); + test.migrate(); + test.verify(true); + } + } finally { + test.shutdownCluster(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index 3f96c0c..94b139b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -82,7 +82,7 @@ public class CreateEditsLog { } final INodeFile inode = new INodeFile(inodeId.nextValue(), null, - p, 0L, 0L, blocks, replication, blockSize); + p, 0L, 0L, blocks, replication, blockSize, (byte)0); inode.toUnderConstruction("", ""); // Append path to filename with information about blockIDs @@ -97,7 +97,7 @@ public class CreateEditsLog { editLog.logMkDir(currentDir, dirInode); } INodeFile fileUc = new INodeFile(inodeId.nextValue(), null, - p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize); + p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0); fileUc.toUnderConstruction("", ""); editLog.logOpenFile(filePath, fileUc, false, false); editLog.logCloseFile(filePath, inode); http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 5153e76..cf37a54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -120,9 +120,9 @@ public class TestAddBlockRetry { } return ret; } - }).when(spyBM).chooseTarget(Mockito.anyString(), Mockito.anyInt(), + }).when(spyBM).chooseTarget4NewBlock(Mockito.anyString(), Mockito.anyInt(), Mockito.any(), Mockito.>any(), - Mockito.anyLong(), Mockito.>any()); + Mockito.anyLong(), Mockito.>any(), Mockito.anyByte()); // create file nn.create(src, FsPermission.getFileDefault(), http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index 4cdd809..b0f6b6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -29,13 +29,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.BlockStoragePolicy; import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; @@ -121,10 +121,10 @@ public class TestDeleteRace { boolean returnChosenNodes, Set excludedNodes, long blocksize, - StorageType storageType) { + final BlockStoragePolicy storagePolicy) { DatanodeStorageInfo[] results = super.chooseTarget(srcPath, numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes, - blocksize, storageType); + blocksize, storagePolicy); try { Thread.sleep(3000); } catch (InterruptedException e) {} http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 377eb84..89ef215 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -194,7 +194,7 @@ public class TestEditLog { for (int i = 0; i < numTransactions; i++) { INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null, - p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize); + p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0); inode.toUnderConstruction("", ""); editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false); http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index b1c5ca7..9bee4a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -17,32 +17,41 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.fs.permission.AclEntryScope.*; -import static org.apache.hadoop.fs.permission.AclEntryType.*; -import static org.apache.hadoop.fs.permission.FsAction.*; -import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; -import static org.junit.Assert.*; +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.MASK; +import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import static org.apache.hadoop.fs.permission.FsAction.ALL; +import static org.apache.hadoop.fs.permission.FsAction.EXECUTE; +import static org.apache.hadoop.fs.permission.FsAction.NONE; +import static org.apache.hadoop.fs.permission.FsAction.READ; +import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; +import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE; +import static org.apache.hadoop.fs.permission.FsAction.WRITE; +import static org.apache.hadoop.fs.permission.FsAction.WRITE_EXECUTE; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; import java.io.IOException; import java.util.Arrays; import org.apache.hadoop.conf.Configuration; -import org.junit.Before; -import org.junit.Test; - import org.apache.hadoop.fs.permission.AclEntry; -import org.apache.hadoop.fs.permission.AclEntryScope; -import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Before; +import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; - -import static org.mockito.Mockito.*; /** * Unit tests covering FSPermissionChecker. All tests in this suite have been * cross-validated against Linux setfacl/getfacl to check for consistency of the @@ -423,7 +432,7 @@ public class TestFSPermissionChecker { FsPermission.createImmutable(perm)); INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, - PREFERRED_BLOCK_SIZE); + PREFERRED_BLOCK_SIZE, (byte)0); parent.addChild(inodeFile); return inodeFile; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/022474c8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 4cddd60..8d298ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1015,10 +1015,11 @@ public class TestFsck { path = DFSUtil.string2Bytes(pathString); long fileId = 312321L; int numChildren = 1; + byte storagePolicy = 0; HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, - fileId, numChildren, null); + fileId, numChildren, null, storagePolicy); Result res = new Result(conf); try {