Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 1EAE81842E for ; Thu, 10 Sep 2015 06:08:59 +0000 (UTC) Received: (qmail 1567 invoked by uid 500); 10 Sep 2015 06:08:58 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 1499 invoked by uid 500); 10 Sep 2015 06:08:58 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 1490 invoked by uid 99); 10 Sep 2015 06:08:58 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 10 Sep 2015 06:08:58 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9C6C3E0A9B; Thu, 10 Sep 2015 06:08:58 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zhz@apache.org To: common-commits@hadoop.apache.org Date: Thu, 10 Sep 2015 06:08:58 -0000 Message-Id: <8221db3419b64d2fb7532a7c27921ba1@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/2] hadoop git commit: HDFS-8833. Erasure coding: store EC schema and cell size in INodeFile and eliminate notion of EC zones. Repository: hadoop Updated Branches: refs/heads/HDFS-7285 60bd765ac -> f62237bc2 http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java index f3260da..b857d3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.fs.shell.PathData; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.ErasureCodingZone; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.util.StringUtils; @@ -45,9 +44,9 @@ public abstract class ECCommand extends Command { public static void registerCommands(CommandFactory factory) { // Register all commands of Erasure CLI, with a '-' at the beginning in name // of the command. - factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME); - factory.addClass(GetECZoneCommand.class, "-" - + GetECZoneCommand.NAME); + factory.addClass(SetECPolicyCommand.class, "-" + SetECPolicyCommand.NAME); + factory.addClass(GetECPolicyCommand.class, "-" + + GetECPolicyCommand.NAME); factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME); } @@ -76,17 +75,18 @@ public abstract class ECCommand extends Command { } /** - * A command to create an EC zone for a path, with a erasure coding policy name. + * A command to set the erasure coding policy for a directory, with the name + * of the policy. */ - static class CreateECZoneCommand extends ECCommand { - public static final String NAME = "createZone"; + static class SetECPolicyCommand extends ECCommand { + public static final String NAME = "setPolicy"; public static final String USAGE = "[-s ] "; public static final String DESCRIPTION = - "Create a zone to encode files using a specified policy\n" + "Set a specified erasure coding policy to a directory\n" + "Options :\n" + " -s : erasure coding policy name to encode files. " + "If not passed the default policy will be used\n" - + " : Path to an empty directory. Under this directory " + + " : Path to a directory. Under this directory " + "files will be encoded using specified erasure coding policy"; private String ecPolicyName; private ErasureCodingPolicy ecPolicy = null; @@ -129,23 +129,23 @@ public abstract class ECCommand extends Command { throw new HadoopIllegalArgumentException(sb.toString()); } } - dfs.createErasureCodingZone(item.path, ecPolicy); - out.println("EC Zone created successfully at " + item.path); + dfs.setErasureCodingPolicy(item.path, ecPolicy); + out.println("EC policy set successfully at " + item.path); } catch (IOException e) { - throw new IOException("Unable to create EC zone for the path " + throw new IOException("Unable to set EC policy for the path " + item.path + ". " + e.getMessage()); } } } /** - * Get the information about the zone + * Get the erasure coding policy of a file or directory */ - static class GetECZoneCommand extends ECCommand { - public static final String NAME = "getZone"; + static class GetECPolicyCommand extends ECCommand { + public static final String NAME = "getPolicy"; public static final String USAGE = ""; public static final String DESCRIPTION = - "Get information about the EC zone at specified path\n"; + "Get erasure coding policy information about at specified path\n"; @Override protected void processOptions(LinkedList args) throws IOException { @@ -162,14 +162,14 @@ public abstract class ECCommand extends Command { super.processPath(item); DistributedFileSystem dfs = (DistributedFileSystem) item.fs; try { - ErasureCodingZone ecZone = dfs.getErasureCodingZone(item.path); - if (ecZone != null) { - out.println(ecZone.toString()); + ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(item.path); + if (ecPolicy != null) { + out.println(ecPolicy.toString()); } else { - out.println("Path " + item.path + " is not in EC zone"); + out.println("Path " + item.path + " is not erasure coded."); } } catch (IOException e) { - throw new IOException("Unable to get EC zone for the path " + throw new IOException("Unable to get EC policy for the path " + item.path + ". " + e.getMessage()); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 6500b96..3551055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1892,12 +1892,12 @@ public class DFSTestUtil { public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir) throws Exception { DistributedFileSystem dfs = cluster.getFileSystem(); - // If outer test already created EC zone, dir should be left as null + // If outer test already set EC policy, dir should be left as null if (toMkdir) { assert dir != null; dfs.mkdirs(dir); try { - dfs.getClient().createErasureCodingZone(dir.toString(), null); + dfs.getClient().setErasureCodingPolicy(dir.toString(), null); } catch (IOException e) { if (!e.getMessage().contains("non-empty directory")) { throw e; http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index a09f0f0..6fcf644 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -80,7 +80,7 @@ public class TestDFSStripedInputStream { } fs = cluster.getFileSystem(); fs.mkdirs(dirPath); - fs.getClient().createErasureCodingZone(dirPath.toString(), null); + fs.getClient().setErasureCodingPolicy(dirPath.toString(), null); } @After http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index a467f40..4263ffa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -68,7 +68,7 @@ public class TestDFSStripedOutputStream { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index 2aa8484..afb7f95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -118,7 +118,7 @@ public class TestDFSStripedOutputStreamWithFailure { cluster.waitActive(); dfs = cluster.getFileSystem(); dfs.mkdirs(dir); - dfs.createErasureCodingZone(dir, null); + dfs.setErasureCodingPolicy(dir, null); } private void tearDown() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java new file mode 100644 index 0000000..f60d77d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -0,0 +1,211 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.junit.Assert.*; + +public class TestErasureCodingPolicies { + private Configuration conf; + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + private static final int BLOCK_SIZE = 1024; + private FSNamesystem namesystem; + + @Before + public void setupCluster() throws IOException { + conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + namesystem = cluster.getNamesystem(); + } + + @After + public void shutdownCluster() throws IOException { + cluster.shutdown(); + } + + @Test + public void testBasicSetECPolicy() + throws IOException, InterruptedException { + final Path testDir = new Path("/ec"); + fs.mkdir(testDir, FsPermission.getDirDefault()); + + /* Normal creation of an erasure coding directory */ + fs.getClient().setErasureCodingPolicy(testDir.toString(), null); + + /* Verify files under the directory are striped */ + final Path ECFilePath = new Path(testDir, "foo"); + fs.create(ECFilePath); + INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString()); + assertTrue(inode.asFile().isStriped()); + + /** + * Verify that setting EC policy on non-empty directory only affects + * newly created files under the directory. + */ + final Path notEmpty = new Path("/nonEmpty"); + fs.mkdir(notEmpty, FsPermission.getDirDefault()); + final Path oldFile = new Path(notEmpty, "old"); + fs.create(oldFile); + fs.getClient().setErasureCodingPolicy(notEmpty.toString(), null); + final Path newFile = new Path(notEmpty, "new"); + fs.create(newFile); + INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString()); + assertFalse(oldInode.asFile().isStriped()); + INode newInode = namesystem.getFSDirectory().getINode(newFile.toString()); + assertTrue(newInode.asFile().isStriped()); + + /* Verify that nested EC policies not supported */ + final Path dir1 = new Path("/dir1"); + final Path dir2 = new Path(dir1, "dir2"); + fs.mkdir(dir1, FsPermission.getDirDefault()); + fs.getClient().setErasureCodingPolicy(dir1.toString(), null); + fs.mkdir(dir2, FsPermission.getDirDefault()); + try { + fs.getClient().setErasureCodingPolicy(dir2.toString(), null); + fail("Nested erasure coding policies"); + } catch (IOException e) { + assertExceptionContains("already has an erasure coding policy", e); + } + + /* Verify that EC policy cannot be set on a file */ + final Path fPath = new Path("/file"); + fs.create(fPath); + try { + fs.getClient().setErasureCodingPolicy(fPath.toString(), null); + fail("Erasure coding policy on file"); + } catch (IOException e) { + assertExceptionContains("erasure coding policy for a file", e); + } + } + + @Test + public void testMoveValidity() throws IOException, InterruptedException { + final Path srcECDir = new Path("/srcEC"); + final Path dstECDir = new Path("/dstEC"); + fs.mkdir(srcECDir, FsPermission.getDirDefault()); + fs.mkdir(dstECDir, FsPermission.getDirDefault()); + fs.getClient().setErasureCodingPolicy(srcECDir.toString(), null); + fs.getClient().setErasureCodingPolicy(dstECDir.toString(), null); + final Path srcFile = new Path(srcECDir, "foo"); + fs.create(srcFile); + + // Test move dir + // Move EC dir under non-EC dir + final Path newDir = new Path("/srcEC_new"); + fs.rename(srcECDir, newDir); + fs.rename(newDir, srcECDir); // move back + + // Move EC dir under another EC dir + fs.rename(srcECDir, dstECDir); + fs.rename(new Path("/dstEC/srcEC"), srcECDir); // move back + + // Test move file + /* Verify that a file can be moved between 2 EC dirs */ + fs.rename(srcFile, dstECDir); + fs.rename(new Path(dstECDir, "foo"), srcECDir); // move back + + /* Verify that a file can be moved from a non-EC dir to an EC dir */ + final Path nonECDir = new Path("/nonEC"); + fs.mkdir(nonECDir, FsPermission.getDirDefault()); + fs.rename(srcFile, nonECDir); + + /* Verify that a file can be moved from an EC dir to a non-EC dir */ + final Path nonECFile = new Path(nonECDir, "nonECFile"); + fs.create(nonECFile); + fs.rename(nonECFile, dstECDir); + } + + @Test + public void testReplication() throws IOException { + final Path testDir = new Path("/ec"); + fs.mkdir(testDir, FsPermission.getDirDefault()); + fs.setErasureCodingPolicy(testDir, null); + final Path fooFile = new Path(testDir, "foo"); + // create ec file with replication=0 + fs.create(fooFile, FsPermission.getFileDefault(), true, + conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), + (short)0, fs.getDefaultBlockSize(fooFile), null); + // set replication should be a no-op + fs.setReplication(fooFile, (short) 3); + } + + @Test + public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Exception { + String src = "/ec"; + final Path ecDir = new Path(src); + fs.mkdir(ecDir, FsPermission.getDirDefault()); + // dir EC policy should be null + assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); + // dir EC policy after setting + fs.getClient().setErasureCodingPolicy(src, null); //Default one will be used. + ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); + verifyErasureCodingInfo(src, sysDefaultECPolicy); + fs.create(new Path(ecDir, "child1")).close(); + // verify for the files in ec dir + verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy); + } + + @Test + public void testGetErasureCodingPolicy() throws Exception { + ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices(); + assertTrue("System ecPolicies should be of only 1 for now", + sysECPolicies.length == 1); + + ErasureCodingPolicy usingECPolicy = sysECPolicies[0]; + String src = "/ec2"; + final Path ecDir = new Path(src); + fs.mkdir(ecDir, FsPermission.getDirDefault()); + // dir ECInfo before being set + assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); + // dir ECInfo after set + fs.getClient().setErasureCodingPolicy(src, usingECPolicy); + verifyErasureCodingInfo(src, usingECPolicy); + fs.create(new Path(ecDir, "child1")).close(); + // verify for the files in ec dir + verifyErasureCodingInfo(src + "/child1", usingECPolicy); + } + + private void verifyErasureCodingInfo( + String src, ErasureCodingPolicy usingECPolicy) throws IOException { + HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src); + ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy(); + assertNotNull(ecPolicy); + assertEquals("Actually used ecPolicy should be equal with target ecPolicy", + usingECPolicy, ecPolicy); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java deleted file mode 100644 index b68aab9..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java +++ /dev/null @@ -1,217 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; -import static org.junit.Assert.*; - -public class TestErasureCodingZones { - private Configuration conf; - private MiniDFSCluster cluster; - private DistributedFileSystem fs; - private static final int BLOCK_SIZE = 1024; - private FSNamesystem namesystem; - - @Before - public void setupCluster() throws IOException { - conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - cluster = new MiniDFSCluster.Builder(conf). - numDataNodes(1).build(); - cluster.waitActive(); - fs = cluster.getFileSystem(); - namesystem = cluster.getNamesystem(); - } - - @After - public void shutdownCluster() throws IOException { - cluster.shutdown(); - } - - @Test - public void testCreateECZone() - throws IOException, InterruptedException { - final Path testDir = new Path("/ec"); - fs.mkdir(testDir, FsPermission.getDirDefault()); - - /* Normal creation of an erasure coding zone */ - fs.getClient().createErasureCodingZone(testDir.toString(), null); - - /* Verify files under the zone are striped */ - final Path ECFilePath = new Path(testDir, "foo"); - fs.create(ECFilePath); - INode inode = namesystem.getFSDirectory().getINode(ECFilePath.toString()); - assertTrue(inode.asFile().isStriped()); - - /* Verify that EC zone cannot be created on non-empty dir */ - final Path notEmpty = new Path("/nonEmpty"); - fs.mkdir(notEmpty, FsPermission.getDirDefault()); - fs.create(new Path(notEmpty, "foo")); - try { - fs.getClient().createErasureCodingZone(notEmpty.toString(), null); - fail("Erasure coding zone on non-empty dir"); - } catch (IOException e) { - assertExceptionContains("erasure coding zone for a non-empty directory", e); - } - - /* Verify that nested EC zones cannot be created */ - final Path zone1 = new Path("/zone1"); - final Path zone2 = new Path(zone1, "zone2"); - fs.mkdir(zone1, FsPermission.getDirDefault()); - fs.getClient().createErasureCodingZone(zone1.toString(), null); - fs.mkdir(zone2, FsPermission.getDirDefault()); - try { - fs.getClient().createErasureCodingZone(zone2.toString(), null); - fail("Nested erasure coding zones"); - } catch (IOException e) { - assertExceptionContains("already in an erasure coding zone", e); - } - - /* Verify that EC zone cannot be created on a file */ - final Path fPath = new Path("/file"); - fs.create(fPath); - try { - fs.getClient().createErasureCodingZone(fPath.toString(), null); - fail("Erasure coding zone on file"); - } catch (IOException e) { - assertExceptionContains("erasure coding zone for a file", e); - } - } - - @Test - public void testMoveValidity() throws IOException, InterruptedException { - final Path srcECDir = new Path("/srcEC"); - final Path dstECDir = new Path("/dstEC"); - fs.mkdir(srcECDir, FsPermission.getDirDefault()); - fs.mkdir(dstECDir, FsPermission.getDirDefault()); - fs.getClient().createErasureCodingZone(srcECDir.toString(), null); - fs.getClient().createErasureCodingZone(dstECDir.toString(), null); - final Path srcFile = new Path(srcECDir, "foo"); - fs.create(srcFile); - - // Test move dir - // Move EC dir under non-EC dir - final Path newDir = new Path("/srcEC_new"); - fs.rename(srcECDir, newDir); - fs.rename(newDir, srcECDir); // move back - - // Move EC dir under another EC dir - fs.rename(srcECDir, dstECDir); - fs.rename(new Path("/dstEC/srcEC"), srcECDir); // move back - - // Test move file - /* Verify that a file can be moved between 2 EC zones */ - fs.rename(srcFile, dstECDir); - fs.rename(new Path(dstECDir, "foo"), srcECDir); // move back - - /* Verify that a file cannot be moved from a non-EC dir to an EC zone */ - final Path nonECDir = new Path("/nonEC"); - fs.mkdir(nonECDir, FsPermission.getDirDefault()); - try { - fs.rename(srcFile, nonECDir); - fail("A file shouldn't be able to move from a non-EC dir to an EC zone"); - } catch (IOException e) { - assertExceptionContains("can't be moved because the source and " + - "destination have different erasure coding policies", e); - } - - /* Verify that a file cannot be moved from an EC zone to a non-EC dir */ - final Path nonECFile = new Path(nonECDir, "nonECFile"); - fs.create(nonECFile); - try { - fs.rename(nonECFile, dstECDir); - } catch (IOException e) { - assertExceptionContains("can't be moved because the source and " + - "destination have different erasure coding policies", e); - } - } - - @Test - public void testReplication() throws IOException { - final Path testDir = new Path("/ec"); - fs.mkdir(testDir, FsPermission.getDirDefault()); - fs.createErasureCodingZone(testDir, null); - final Path fooFile = new Path(testDir, "foo"); - // create ec file with replication=0 - fs.create(fooFile, FsPermission.getFileDefault(), true, - conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - (short)0, fs.getDefaultBlockSize(fooFile), null); - // set replication should be a no-op - fs.setReplication(fooFile, (short) 3); - } - - @Test - public void testGetErasureCodingInfoWithSystemDefaultECPolicy() throws Exception { - String src = "/ec"; - final Path ecDir = new Path(src); - fs.mkdir(ecDir, FsPermission.getDirDefault()); - // dir ECInfo before creating ec zone - assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); - // dir ECInfo after creating ec zone - fs.getClient().createErasureCodingZone(src, null); //Default one will be used. - ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy(); - verifyErasureCodingInfo(src, sysDefaultECPolicy); - fs.create(new Path(ecDir, "child1")).close(); - // verify for the files in ec zone - verifyErasureCodingInfo(src + "/child1", sysDefaultECPolicy); - } - - @Test - public void testGetErasureCodingInfo() throws Exception { - ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager.getSystemPolices(); - assertTrue("System ecPolicies should be of only 1 for now", - sysECPolicies.length == 1); - - ErasureCodingPolicy usingECPolicy = sysECPolicies[0]; - String src = "/ec2"; - final Path ecDir = new Path(src); - fs.mkdir(ecDir, FsPermission.getDirDefault()); - // dir ECInfo before creating ec zone - assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy()); - // dir ECInfo after creating ec zone - fs.getClient().createErasureCodingZone(src, usingECPolicy); - verifyErasureCodingInfo(src, usingECPolicy); - fs.create(new Path(ecDir, "child1")).close(); - // verify for the files in ec zone - verifyErasureCodingInfo(src + "/child1", usingECPolicy); - } - - private void verifyErasureCodingInfo( - String src, ErasureCodingPolicy usingECPolicy) throws IOException { - HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src); - ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy(); - assertNotNull(ecPolicy); - assertEquals("Actually used ecPolicy should be equal with target ecPolicy", - usingECPolicy, ecPolicy); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java index 4610ced..64063d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java @@ -38,24 +38,24 @@ public class TestFileStatusWithECPolicy { @Test public void testFileStatusWithECPolicy() throws Exception { - // test directory not in EC zone + // test directory doesn't have an EC policy final Path dir = new Path("/foo"); assertTrue(fs.mkdir(dir, FsPermission.getDirDefault())); assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy()); - // test file not in EC zone + // test file doesn't have an EC policy final Path file = new Path(dir, "foo"); fs.create(file).close(); assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy()); fs.delete(file, true); final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy(); - // create EC zone on dir - fs.createErasureCodingZone(dir, ecPolicy1); + // set EC policy on dir + fs.setErasureCodingPolicy(dir, ecPolicy1); final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy(); assertNotNull(ecPolicy2); assertTrue(ecPolicy1.equals(ecPolicy2)); - // test file in EC zone + // test file doesn't have an EC policy fs.create(file).close(); final ErasureCodingPolicy ecPolicy3 = fs.getClient().getFileInfo(file.toUri().getPath()).getErasureCodingPolicy(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index 9048fbd..68cd25e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -68,7 +68,7 @@ public class TestReadStripedFileWithDecoding { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java index 08a5f58..ca376af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java @@ -53,7 +53,7 @@ public class TestReadStripedFileWithMissingBlocks { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java index 7a0851f..b581845 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java @@ -78,7 +78,7 @@ public class TestRecoverStripedFile { cluster.waitActive(); fs = cluster.getFileSystem(); - fs.getClient().createErasureCodingZone("/", null); + fs.getClient().setErasureCodingPolicy("/", null); List datanodes = cluster.getDataNodes(); for (int i = 0; i < dnNum; i++) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index f577ddb..318eb9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -54,7 +54,7 @@ public class TestSafeModeWithStripedFile { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); cluster.waitActive(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index b383c85..5381fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -57,7 +57,7 @@ public class TestWriteReadStripedFile { public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java index deffbcc..50f98a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize; -import static org.apache.hadoop.hdfs.StripedFileTestUtil.cellSize; import static org.apache.hadoop.hdfs.StripedFileTestUtil.dataBlocks; import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs; import static org.apache.hadoop.hdfs.StripedFileTestUtil.parityBlocks; @@ -48,7 +47,7 @@ public class TestWriteStripedFileWithFailure { public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); } @@ -158,4 +157,4 @@ public class TestWriteStripedFileWithFailure { throw new IOException("Failed at i=" + i, e); } } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index eb24fb0..21e60c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -82,7 +82,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -1702,7 +1701,7 @@ public class TestBalancer { cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); - client.createErasureCodingZone("/", null); + client.setErasureCodingPolicy("/", null); long totalCapacity = sum(capacities); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 2202b34..9dc537c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -56,7 +56,7 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS { conf = getConf(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient() - .createErasureCodingZone("/", null); + .setErasureCodingPolicy("/", null); try { cluster.waitActive(); doTestRead(conf, cluster, true); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index 2e084fc..9f4c47d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -71,7 +71,7 @@ public class TestSequentialBlockGroupId { private MiniDFSCluster cluster; private FileSystem fs; private SequentialBlockGroupIdGenerator blockGrpIdGenerator; - private Path eczone = new Path("/eczone"); + private Path ecDir = new Path("/ecDir"); @Before public void setup() throws Exception { @@ -84,9 +84,9 @@ public class TestSequentialBlockGroupId { fs = cluster.getFileSystem(); blockGrpIdGenerator = cluster.getNamesystem().getBlockIdManager() .getBlockGroupIdGenerator(); - fs.mkdirs(eczone); + fs.mkdirs(ecDir); cluster.getFileSystem().getClient() - .createErasureCodingZone("/eczone", null); + .setErasureCodingPolicy("/ecDir", null); } @After @@ -104,7 +104,7 @@ public class TestSequentialBlockGroupId { long blockGroupIdInitialValue = blockGrpIdGenerator.getCurrentValue(); // Create a file that is 4 blocks long. - Path path = new Path(eczone, "testBlockGrpIdGeneration.dat"); + Path path = new Path(ecDir, "testBlockGrpIdGeneration.dat"); DFSTestUtil.createFile(fs, path, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks = DFSTestUtil.getAllBlocks(fs, path); @@ -134,7 +134,7 @@ public class TestSequentialBlockGroupId { // Create a file with a few blocks to rev up the global block ID // counter. - Path path1 = new Path(eczone, "testBlockGrpIdCollisionDetection_file1.dat"); + Path path1 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file1.dat"); DFSTestUtil.createFile(fs, path1, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks1 = DFSTestUtil.getAllBlocks(fs, path1); @@ -145,7 +145,7 @@ public class TestSequentialBlockGroupId { blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue); // Trigger collisions by creating a new file. - Path path2 = new Path(eczone, "testBlockGrpIdCollisionDetection_file2.dat"); + Path path2 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file2.dat"); DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks2 = DFSTestUtil.getAllBlocks(fs, path2); @@ -204,7 +204,7 @@ public class TestSequentialBlockGroupId { // Reset back to the initial value to trigger collision blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue); // Trigger collisions by creating a new file. - Path path2 = new Path(eczone, "testCollisionWithLegacyBlock_file2.dat"); + Path path2 = new Path(ecDir, "testCollisionWithLegacyBlock_file2.dat"); DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize, REPLICATION, SEED); List blocks2 = DFSTestUtil.getAllBlocks(fs, path2); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 2598fa8..7794f04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -470,8 +470,8 @@ public class TestMover { client.mkdirs(barDir, new FsPermission((short) 777), true); client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); - // set "/bar" directory with EC zone. - client.createErasureCodingZone(barDir, null); + // set an EC policy on "/bar" directory + client.setErasureCodingPolicy(barDir, null); // write file to barDir final String fooFile = "/bar/foo"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index 7d06a9b..ae2cbab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.junit.After; @@ -39,7 +38,6 @@ import org.junit.Test; import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import static org.junit.Assert.assertEquals; @@ -70,7 +68,7 @@ public class TestAddOverReplicatedStripedBlocks { cluster.waitActive(); fs = cluster.getFileSystem(); fs.mkdirs(dirPath); - fs.getClient().createErasureCodingZone(dirPath.toString(), null); + fs.getClient().setErasureCodingPolicy(dirPath.toString(), null); } @After http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index c472cd8..4351fb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -74,7 +74,7 @@ public class TestAddStripedBlocks { .numDataNodes(GROUP_SIZE).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); - dfs.getClient().createErasureCodingZone("/", null); + dfs.getClient().setErasureCodingPolicy("/", null); } @After http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 694411f..40572f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -452,7 +452,7 @@ public class TestFSEditLogLoader { //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.getClient().getNamenode().createErasureCodingZone(testDir, null); + fs.getClient().getNamenode().setErasureCodingPolicy(testDir, null); // Create a file with striped block Path p = new Path(testFilePath); @@ -524,7 +524,7 @@ public class TestFSEditLogLoader { //set the storage policy of the directory fs.mkdir(new Path(testDir), new FsPermission("755")); - fs.getClient().getNamenode().createErasureCodingZone(testDir, null); + fs.getClient().getNamenode().setErasureCodingPolicy(testDir, null); //create a file with striped blocks Path p = new Path(testFilePath); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index d3689f3..7565e86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -141,7 +141,7 @@ public class TestFSImage { private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException{ // contruct a INode with StripedBlock for saving and loading - fsn.createErasureCodingZone("/", null, false); + fsn.setErasureCodingPolicy("/", null, false); long id = 123456789; byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes(); PermissionStatus permissionStatus = new PermissionStatus("testuser_a", @@ -425,7 +425,7 @@ public class TestFSImage { .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); - fs.getClient().getNamenode().createErasureCodingZone("/", null); + fs.getClient().getNamenode().setErasureCodingPolicy("/", null); Path file = new Path("/striped"); FSDataOutputStream out = fs.create(file); byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index de29997..efa5027 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import org.junit.After; import org.junit.Assert; @@ -66,7 +65,7 @@ public class TestQuotaWithStripedBlocks { dfs = cluster.getFileSystem(); dfs.mkdirs(ecDir); - dfs.getClient().createErasureCodingZone(ecDir.toString(), ecPolicy); + dfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy); dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA); dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA); dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 48b22c0..2a593d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -217,8 +217,8 @@ public class TestStripedINodeFile { try { final int len = 1024; final Path parentDir = new Path("/parentDir"); - final Path zone = new Path(parentDir, "zone"); - final Path zoneFile = new Path(zone, "zoneFile"); + final Path ecDir = new Path(parentDir, "ecDir"); + final Path ecFile = new Path(ecDir, "ecFile"); final Path contiguousFile = new Path(parentDir, "someFile"); final DistributedFileSystem dfs; final Configuration conf = new Configuration(); @@ -232,18 +232,18 @@ public class TestStripedINodeFile { FSNamesystem fsn = cluster.getNamesystem(); dfs = cluster.getFileSystem(); - dfs.mkdirs(zone); + dfs.mkdirs(ecDir); - // create erasure zone - dfs.createErasureCodingZone(zone, null); - DFSTestUtil.createFile(dfs, zoneFile, len, (short) 1, 0xFEED); + // set erasure coding policy + dfs.setErasureCodingPolicy(ecDir, null); + DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED); DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED); final FSDirectory fsd = fsn.getFSDirectory(); // Case-1: Verify the behavior of striped blocks // Get blocks of striped file - INode inodeStriped = fsd.getINode("/parentDir/zone/zoneFile"); - assertTrue("Failed to get INodeFile for /parentDir/zone/zoneFile", + INode inodeStriped = fsd.getINode("/parentDir/ecDir/ecFile"); + assertTrue("Failed to get INodeFile for /parentDir/ecDir/ecFile", inodeStriped instanceof INodeFile); INodeFile inodeStripedFile = (INodeFile) inodeStriped; BlockInfo[] stripedBlks = inodeStripedFile.getBlocks(); @@ -252,8 +252,8 @@ public class TestStripedINodeFile { blockInfo.isDeleted()); } - // delete erasure zone directory - dfs.delete(zone, true); + // delete directory with erasure coding policy + dfs.delete(ecDir, true); for (BlockInfo blockInfo : stripedBlks) { assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index 7bfaab6..1e2326a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -60,7 +60,7 @@ public class TestOfflineImageViewerWithStripedBlocks { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); - cluster.getFileSystem().getClient().createErasureCodingZone("/", null); + cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); Path eczone = new Path("/eczone"); fs.mkdirs(eczone); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62237bc/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml index 5e60658..3a10a50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml @@ -48,39 +48,39 @@ - help: createZone command + help: setPolicy command - -fs NAMENODE -help createZone + -fs NAMENODE -help setPolicy RegexpComparator - ^[ \t]*Create a zone to encode files using a specified policy( )* + ^[ \t]*Set a specified erasure coding policy to a directory( )* RegexpComparator - ^-createZone \[-s <policyName>\] <path>(.)* + ^-setPolicy \[-s <policyName>\] <path>(.)* - help: getZone command + help: getPolicy command - -fs NAMENODE -help getZone + -fs NAMENODE -help getPolicy SubstringComparator - Get information about the EC zone at specified path + Get erasure coding policy information about at specified path RegexpComparator - ^-getZone <path>(.)* + ^-getPolicy <path>(.)* @@ -106,63 +106,63 @@ - createZone : create a zone to encode files + setPolicy : set erasure coding policy on a directory to encode files - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s RS-6-3-64k /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - EC Zone created successfully at NAMENODE/eczone + EC policy set successfully at NAMENODE/ecdir - createZone : create a zone twice + setPolicy : set a policy twice - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone /eczone - -fs NAMENODE -createZone /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -setPolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - Directory /eczone is already in an erasure coding zone + Directory /ecdir already has an erasure coding policy - createZone : default policy + setPolicy : default policy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone /eczone - -fs NAMENODE -getZone /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir + -fs NAMENODE -getPolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k + ErasureCodingPolicy=[Name=RS-6-3-64k - getZone : get information about the EC zone at specified path not in zone + getPolicy : get EC policy information at specified path, which doesn't have an EC policy -fs NAMENODE -mkdir /noec - -fs NAMENODE -getZone /noec + -fs NAMENODE -getPolicy /noec -fs NAMENODE -rmdir /noec @@ -170,45 +170,45 @@ SubstringComparator - Path NAMENODE/noec is not in EC zone + Path NAMENODE/noec is not erasure coded - getZone : get information about the EC zone at specified path + getPolicy : get EC policy information at specified path, which doesn't have an EC policy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s RS-6-3-64k /eczone - -fs NAMENODE -getZone /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir + -fs NAMENODE -getPolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k + ErasureCodingPolicy=[Name=RS-6-3-64k - getZone : get EC zone at specified file path + getPolicy : get EC policy information at specified path, which doesn't have an EC policy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s RS-6-3-64k /eczone - -fs NAMENODE -touchz /eczone/ecfile - -fs NAMENODE -getZone /eczone/ecfile + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s RS-6-3-64k /ecdir + -fs NAMENODE -touchz /ecdir/ecfile + -fs NAMENODE -getPolicy /ecdir/ecfile - -fs NAMENODE -rm /eczone/ecfile - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rm /ecdir/ecfile + -fs NAMENODE -rmdir /ecdir SubstringComparator - Dir: /eczone, Policy: ErasureCodingPolicy=[Name=RS-6-3-64k + ErasureCodingPolicy=[Name=RS-6-3-64k @@ -230,64 +230,64 @@ - createZone : illegal parameters - path is missing + setPolicy : illegal parameters - path is missing - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir RegexpComparator - ^-createZone: <path> is missing(.)* + ^-setPolicy: <path> is missing(.)* - createZone : illegal parameters - policy name is missing + setPolicy : illegal parameters - policy name is missing - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir RegexpComparator - ^-createZone: option -s requires 1 argument(.)* + ^-setPolicy: option -s requires 1 argument(.)* - createZone : illegal parameters - too many arguments + setPolicy : illegal parameters - too many arguments - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone /eczone1 /eczone2 + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy /ecdir1 /ecdir2 - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir SubstringComparator - -createZone: Too many arguments + -setPolicy: Too many arguments - createZone : illegal parameters - invalidpolicy + setPolicy : illegal parameters - invalidpolicy - -fs NAMENODE -mkdir /eczone - -fs NAMENODE -createZone -s invalidpolicy /eczone + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -s invalidpolicy /ecdir - -fs NAMENODE -rmdir /eczone + -fs NAMENODE -rmdir /ecdir @@ -298,62 +298,62 @@ - createZone : illegal parameters - no such file + setPolicy : illegal parameters - no such file - -fs NAMENODE -createZone /eczone + -fs NAMENODE -setPolicy /ecdir RegexpComparator - ^createZone: `/eczone': No such file or directory(.)* + ^setPolicy: `/ecdir': No such file or directory(.)* - getZone : illegal parameters - path is missing + getPolicy : illegal parameters - path is missing - -fs NAMENODE -getZone + -fs NAMENODE -getPolicy RegexpComparator - ^-getZone: <path> is missing(.)* + ^-getPolicy: <path> is missing(.)* - getZone : illegal parameters - too many arguments + getPolicy : illegal parameters - too many arguments - -fs NAMENODE -getZone /eczone /eczone + -fs NAMENODE -getPolicy /ecdir /ecdir - -fs NAMENODE -rm /eczone + -fs NAMENODE -rm /ecdir SubstringComparator - -getZone: Too many arguments + -getPolicy: Too many arguments - getZone : illegal parameters - no such file + getPolicy : illegal parameters - no such file - -fs NAMENODE -getZone /eczone + -fs NAMENODE -getPolicy /ecdir RegexpComparator - ^getZone: `/eczone': No such file or directory(.)* + ^getPolicy: `/ecdir': No such file or directory(.)* @@ -361,7 +361,7 @@ listPolicies : illegal parameters - too many parameters - -fs NAMENODE -listPolicies /eczone + -fs NAMENODE -listPolicies /ecdir