Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id F1B8D175A0 for ; Thu, 18 Jun 2015 18:24:45 +0000 (UTC) Received: (qmail 49720 invoked by uid 500); 18 Jun 2015 18:24:40 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 49385 invoked by uid 500); 18 Jun 2015 18:24:40 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 48952 invoked by uid 99); 18 Jun 2015 18:24:40 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 18 Jun 2015 18:24:40 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 51CA6E3CAA; Thu, 18 Jun 2015 18:24:40 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: zjshen@apache.org To: common-commits@hadoop.apache.org Date: Thu, 18 Jun 2015 18:24:56 -0000 Message-Id: <91c4b9e17d584ddf98b5526129f43940@git.apache.org> In-Reply-To: <4c0d3a1bd9874f39bd3469db48d05967@git.apache.org> References: <4c0d3a1bd9874f39bd3469db48d05967@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [18/50] [abbrv] hadoop git commit: HDFS-8361. Choose SSD over DISK in block placement. HDFS-8361. Choose SSD over DISK in block placement. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e24417a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e24417a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e24417a Branch: refs/heads/YARN-2928 Commit: 5e24417a5c01654e818e59940a73aa960c3d5f0d Parents: b181b87 Author: Tsz-Wo Nicholas Sze Authored: Mon Jun 15 17:12:01 2015 -0700 Committer: Zhijie Shen Committed: Thu Jun 18 11:10:06 2015 -0700 ---------------------------------------------------------------------- .../java/org/apache/hadoop/fs/StorageType.java | 7 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/TestBlockStoragePolicy.java | 75 +++++++++++++++++++- 3 files changed, 80 insertions(+), 4 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e24417a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java index 68069d7..0948801 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java @@ -33,10 +33,11 @@ import org.apache.hadoop.util.StringUtils; @InterfaceAudience.Public @InterfaceStability.Unstable public enum StorageType { - DISK(false), + // sorted by the speed of the storage types, from fast to slow + RAM_DISK(true), SSD(false), - ARCHIVE(false), - RAM_DISK(true); + DISK(false), + ARCHIVE(false); private final boolean isTransient; http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e24417a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9822575..79e7820 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -920,6 +920,8 @@ Release 2.7.1 - UNRELEASED HDFS-8521. Add VisibleForTesting annotation to BlockPoolSlice#selectReplicaToDelete. (cmccabe) + HDFS-8361. Choose SSD over DISK in block placement. (szetszwo) + OPTIMIZATIONS BUG FIXES http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e24417a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index ea69f97..0d59ded 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -26,6 +26,7 @@ import java.util.*; import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FSDataOutputStream; @@ -40,7 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; -import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; @@ -1153,6 +1154,50 @@ public class TestBlockStoragePolicy { } @Test + public void testChooseSsdOverDisk() throws Exception { + BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", + new StorageType[]{StorageType.SSD, StorageType.DISK, + StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); + + final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"}; + final String[] hosts = {"host1", "host2", "host3"}; + final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK}; + + final DatanodeStorageInfo[] diskStorages + = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks); + final DatanodeDescriptor[] dataNodes + = DFSTestUtil.toDatanodeDescriptor(diskStorages); + for(int i = 0; i < dataNodes.length; i++) { + BlockManagerTestUtil.updateStorage(dataNodes[i], + new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, + StorageType.SSD)); + } + + FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + DFSTestUtil.formatNameNode(conf); + NameNode namenode = new NameNode(conf); + + final BlockManager bm = namenode.getNamesystem().getBlockManager(); + BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); + NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); + for (DatanodeDescriptor datanode : dataNodes) { + cluster.add(datanode); + } + + DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, + dataNodes[0], Collections.emptyList(), false, + new HashSet(), 0, policy); + System.out.println(policy.getName() + ": " + Arrays.asList(targets)); + Assert.assertEquals(2, targets.length); + Assert.assertEquals(StorageType.SSD, targets[0].getStorageType()); + Assert.assertEquals(StorageType.DISK, targets[1].getStorageType()); + } + + @Test public void testGetFileStoragePolicyAfterRestartNN() throws Exception { //HDFS8219 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -1233,4 +1278,32 @@ public class TestBlockStoragePolicy { cluster.shutdown(); } } + + @Test + public void testStorageType() { + final EnumMap map = new EnumMap<>(StorageType.class); + + //put storage type is reversed order + map.put(StorageType.ARCHIVE, 1); + map.put(StorageType.DISK, 1); + map.put(StorageType.SSD, 1); + map.put(StorageType.RAM_DISK, 1); + + { + final Iterator i = map.keySet().iterator(); + Assert.assertEquals(StorageType.RAM_DISK, i.next()); + Assert.assertEquals(StorageType.SSD, i.next()); + Assert.assertEquals(StorageType.DISK, i.next()); + Assert.assertEquals(StorageType.ARCHIVE, i.next()); + } + + { + final Iterator> i + = map.entrySet().iterator(); + Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey()); + Assert.assertEquals(StorageType.SSD, i.next().getKey()); + Assert.assertEquals(StorageType.DISK, i.next().getKey()); + Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey()); + } + } }