hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From weic...@apache.org
Subject [hadoop] branch branch-2.8 updated: HDFS-14512. ONE_SSD policy will be violated while write data with DistributedFileSystem.create(....favoredNodes). Contributed by Ayush Saxena.
Date Thu, 30 May 2019 04:47:48 GMT
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
     new bb6b41c  HDFS-14512. ONE_SSD policy will be violated while write data with DistributedFileSystem.create(....favoredNodes).
Contributed by Ayush Saxena.
bb6b41c is described below

commit bb6b41cde3b59e742e8a0d2df31211b19067b48b
Author: Ayush Saxena <ayushsaxena@apache.org>
AuthorDate: Wed May 29 20:52:58 2019 -0700

    HDFS-14512. ONE_SSD policy will be violated while write data with DistributedFileSystem.create(....favoredNodes).
Contributed by Ayush Saxena.
    
    Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
    (cherry picked from commit c1caab40f27e3e4f58ff1b5ef3e93efc56bbecbe)
    (cherry picked from commit 2cd7577a5a58db42a14a659b85fbd475091d0cb2)
    (cherry picked from commit 90cc5ddd4957fcb27ef5a1e7ee1a67c4eca9a3f5)
    (cherry picked from commit 942c5b04f2f7bc33595c4df5c66104ce3c4c2c69)
    
    Conflicts:
    	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    
    (cherry picked from commit c63049dfd413dd790e9cc6cca13c310140edc260)
    
     Conflicts:
    	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
---
 .../blockmanagement/BlockPlacementPolicy.java      | 12 ++++++++
 .../BlockPlacementPolicyDefault.java               | 35 +++++++++++++++-------
 .../hadoop/hdfs/TestDistributedFileSystem.java     | 28 +++++++++++++++++
 3 files changed, 65 insertions(+), 10 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 1b614f4..4062598 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.EnumMap;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -106,6 +107,17 @@ public abstract class BlockPlacementPolicy {
   }
 
   /**
+   * @param storageTypes storage types that should be used as targets.
+   */
+  public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas,
+      Node writer, List<DatanodeStorageInfo> chosen, boolean returnChosenNodes,
+      Set<Node> excludedNodes, long blocksize, BlockStoragePolicy storagePolicy,
+      EnumSet<AddBlockFlag> flags, EnumMap<StorageType, Integer> storageTypes)
{
+    return chooseTarget(srcPath, numOfReplicas, writer, chosen,
+        returnChosenNodes, excludedNodes, blocksize, storagePolicy, flags);
+  }
+
+  /**
    * Verify if the block's placement meets requirement of placement policy,
    * i.e. replicas are placed on no less than minRacks racks in the system.
    * 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d688ee1..f5205c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -117,7 +117,16 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
                                     final BlockStoragePolicy storagePolicy,
                                     EnumSet<AddBlockFlag> flags) {
     return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
-        excludedNodes, blocksize, storagePolicy, flags);
+        excludedNodes, blocksize, storagePolicy, flags, null);
+  }
+
+  @Override
+  public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas,
+      Node writer, List<DatanodeStorageInfo> chosen, boolean returnChosenNodes,
+      Set<Node> excludedNodes, long blocksize, BlockStoragePolicy storagePolicy,
+      EnumSet<AddBlockFlag> flags, EnumMap<StorageType, Integer> storageTypes)
{
+    return chooseTarget(numOfReplicas, writer, chosen, returnChosenNodes,
+        excludedNodes, blocksize, storagePolicy, flags, storageTypes);
   }
 
   @Override
@@ -169,7 +178,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
         DatanodeStorageInfo[] remainingTargets =
             chooseTarget(src, numOfReplicas, writer,
                 new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
-                favoriteAndExcludedNodes, blocksize, storagePolicy, flags);
+                favoriteAndExcludedNodes, blocksize, storagePolicy, flags,
+                storageTypes);
         for (int i = 0; i < remainingTargets.length; i++) {
           results.add(remainingTargets[i]);
         }
@@ -219,7 +229,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
                                     Set<Node> excludedNodes,
                                     long blocksize,
                                     final BlockStoragePolicy storagePolicy,
-                                    EnumSet<AddBlockFlag> addBlockFlags) {
+                                    EnumSet<AddBlockFlag> addBlockFlags,
+                                    EnumMap<StorageType, Integer> sTypes) {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
       return DatanodeStorageInfo.EMPTY_ARRAY;
     }
@@ -255,7 +266,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
       localNode = chooseTarget(numOfReplicas, writer,
           excludedNodeCopy, blocksize, maxNodesPerRack, results,
           avoidStaleNodes, storagePolicy,
-          EnumSet.noneOf(StorageType.class), results.isEmpty());
+          EnumSet.noneOf(StorageType.class), results.isEmpty(), sTypes);
       if (results.size() < numOfReplicas) {
         // not enough nodes; discard results and fall back
         results = null;
@@ -265,7 +276,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
       results = new ArrayList<>(chosenStorage);
       localNode = chooseTarget(numOfReplicas, writer, excludedNodes,
           blocksize, maxNodesPerRack, results, avoidStaleNodes,
-          storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty());
+          storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty(),
+          sTypes);
     }
 
     if (!returnChosenNodes) {  
@@ -345,6 +357,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
    * @param maxNodesPerRack max nodes allowed per rack
    * @param results the target nodes already chosen
    * @param avoidStaleNodes avoid stale nodes in replica choosing
+   * @param storageTypes storage type to be considered for target
    * @return local node of writer (not chosen node)
    */
   private Node chooseTarget(int numOfReplicas,
@@ -356,7 +369,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
                             final boolean avoidStaleNodes,
                             final BlockStoragePolicy storagePolicy,
                             final EnumSet<StorageType> unavailableStorages,
-                            final boolean newBlock) {
+                            final boolean newBlock,
+                            EnumMap<StorageType, Integer> storageTypes) {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
       return (writer instanceof DatanodeDescriptor) ? writer : null;
     }
@@ -374,8 +388,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
         .chooseStorageTypes((short) totalReplicasExpected,
             DatanodeStorageInfo.toStorageTypes(results),
             unavailableStorages, newBlock);
-    final EnumMap<StorageType, Integer> storageTypes =
-        getRequiredStorageTypes(requiredStorageTypes);
+    if (storageTypes == null) {
+      storageTypes = getRequiredStorageTypes(requiredStorageTypes);
+    }
     if (LOG.isTraceEnabled()) {
       LOG.trace("storageTypes=" + storageTypes);
     }
@@ -418,7 +433,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
         numOfReplicas = totalReplicasExpected - results.size();
         return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize,
             maxNodesPerRack, results, false, storagePolicy, unavailableStorages,
-            newBlock);
+            newBlock, null);
       }
 
       boolean retry = false;
@@ -438,7 +453,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy
{
         numOfReplicas = totalReplicasExpected - results.size();
         return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize,
             maxNodesPerRack, results, false, storagePolicy, unavailableStorages,
-            newBlock);
+            newBlock, null);
       }
     }
     return writer;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index d4894d5..25b4e54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -49,6 +49,7 @@ import java.util.Set;
 import org.apache.commons.lang.ArrayUtils;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.conf.Configuration;
@@ -76,6 +77,7 @@ import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.net.Peer;
@@ -1433,4 +1435,30 @@ public class TestDistributedFileSystem {
       }
     }
   }
+
+  @Test
+  public void testStorageFavouredNodes()
+      throws IOException, InterruptedException, TimeoutException {
+    Configuration conf = new HdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .storageTypes(new StorageType[] {StorageType.SSD, StorageType.DISK})
+        .numDataNodes(3).storagesPerDatanode(2).build()) {
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path file1 = new Path("/tmp/file1");
+      fs.mkdirs(new Path("/tmp"));
+      fs.setStoragePolicy(new Path("/tmp"), "ONE_SSD");
+      InetSocketAddress[] addrs =
+          {cluster.getDataNodes().get(0).getXferAddress()};
+      HdfsDataOutputStream stream = fs.create(file1, FsPermission.getDefault(),
+          false, 1024, (short) 3, 1024, null, addrs);
+      stream.write("Some Bytes".getBytes());
+      stream.close();
+      DFSTestUtil.waitReplication(fs, file1, (short) 3);
+      BlockLocation[] locations = fs.getClient()
+          .getBlockLocations(file1.toUri().getPath(), 0, Long.MAX_VALUE);
+      int numSSD = Collections.frequency(
+          Arrays.asList(locations[0].getStorageTypes()), StorageType.SSD);
+      assertEquals("Number of SSD should be 1 but was : " + numSSD, 1, numSSD);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message