hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject [1/2] hadoop git commit: HDFS-10660. Expose storage policy apis via HDFSAdmin interface. (Contributed by Rakesh R)
Date Fri, 22 Jul 2016 19:34:22 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ea10e1384 -> 2f1e35291
  refs/heads/trunk 77ac04efe -> c6e3a0020


HDFS-10660. Expose storage policy apis via HDFSAdmin interface. (Contributed by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f1e3529
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f1e3529
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f1e3529

Branch: refs/heads/branch-2
Commit: 2f1e35291289c8231344f8f6beb2fe6fbc5d7a37
Parents: ea10e13
Author: Arpit Agarwal <arp@apache.org>
Authored: Fri Jul 22 11:38:33 2016 -0700
Committer: Arpit Agarwal <arp@apache.org>
Committed: Fri Jul 22 12:12:29 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    | 35 +++++++++
 .../org/apache/hadoop/hdfs/TestHdfsAdmin.java   | 82 +++++++++++++++++++-
 2 files changed, 115 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f1e3529/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index c92e99f..2ee7eea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -20,12 +20,14 @@ package org.apache.hadoop.hdfs.client;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
+import java.util.Collection;
 import java.util.EnumSet;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -412,6 +414,39 @@ public class HdfsAdmin {
     dfs.setStoragePolicy(src, policyName);
   }
 
+  /**
+   * Unset the storage policy set for a given file or directory.
+   *
+   * @param src file or directory path.
+   * @throws IOException
+   */
+  public void unsetStoragePolicy(final Path src) throws IOException {
+    dfs.unsetStoragePolicy(src);
+  }
+
+  /**
+   * Query the effective storage policy ID for the given file or directory.
+   *
+   * @param src file or directory path.
+   * @return storage policy for the given file or directory.
+   * @throws IOException
+   */
+  public BlockStoragePolicySpi getStoragePolicy(final Path src)
+      throws IOException {
+    return dfs.getStoragePolicy(src);
+  }
+
+  /**
+   * Retrieve all the storage policies supported by HDFS file system.
+   *
+   * @return all storage policies supported by HDFS file system.
+   * @throws IOException
+   */
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return dfs.getAllStoragePolicies();
+  }
+
   private void provisionEZTrash(Path path) throws IOException {
     // make sure the path is an EZ
     EncryptionZone ez = dfs.getEZForPath(path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f1e3529/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
index 0f5bdf5..717d79e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
@@ -23,24 +23,35 @@ import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.Sets;
+
 public class TestHdfsAdmin {
   
   private static final Path TEST_PATH = new Path("/test");
+  private static final short REPL = 1;
+  private static final int SIZE = 128;
   private final Configuration conf = new Configuration();
   private MiniDFSCluster cluster;
-  
+
   @Before
   public void setUpCluster() throws IOException {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    cluster.waitActive();
   }
   
   @After
@@ -94,4 +105,71 @@ public class TestHdfsAdmin {
   public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException {
     new HdfsAdmin(new URI("file:///bad-scheme"), conf);
   }
+
+  /**
+   * Test that we can set, get, unset storage policies via {@link HdfsAdmin}.
+   */
+  @Test
+  public void testHdfsAdminStoragePolicies() throws Exception {
+    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    FileSystem fs = FileSystem.get(conf);
+    final Path foo = new Path("/foo");
+    final Path bar = new Path(foo, "bar");
+    final Path wow = new Path(bar, "wow");
+    DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
+
+    final BlockStoragePolicySuite suite = BlockStoragePolicySuite
+        .createDefaultSuite();
+    final BlockStoragePolicy warm = suite.getPolicy("WARM");
+    final BlockStoragePolicy cold = suite.getPolicy("COLD");
+    final BlockStoragePolicy hot = suite.getPolicy("HOT");
+
+    /*
+     * test: set storage policy
+     */
+    hdfsAdmin.setStoragePolicy(foo, warm.getName());
+    hdfsAdmin.setStoragePolicy(bar, cold.getName());
+    hdfsAdmin.setStoragePolicy(wow, hot.getName());
+
+    /*
+     * test: get storage policy after set
+     */
+    assertEquals(hdfsAdmin.getStoragePolicy(foo), warm);
+    assertEquals(hdfsAdmin.getStoragePolicy(bar), cold);
+    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
+
+    /*
+     * test: unset storage policy
+     */
+    hdfsAdmin.unsetStoragePolicy(foo);
+    hdfsAdmin.unsetStoragePolicy(bar);
+    hdfsAdmin.unsetStoragePolicy(wow);
+
+    /*
+     * test: get storage policy after unset. HOT by default.
+     */
+    assertEquals(hdfsAdmin.getStoragePolicy(foo), hot);
+    assertEquals(hdfsAdmin.getStoragePolicy(bar), hot);
+    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
+
+    /*
+     * test: get all storage policies
+     */
+    // Get policies via HdfsAdmin
+    Set<String> policyNamesSet1 = new HashSet<>();
+    for (BlockStoragePolicySpi policy : hdfsAdmin.getAllStoragePolicies()) {
+      policyNamesSet1.add(policy.getName());
+    }
+
+    // Get policies via BlockStoragePolicySuite
+    Set<String> policyNamesSet2 = new HashSet<>();
+    for (BlockStoragePolicy policy : suite.getAllPolicies()) {
+      policyNamesSet2.add(policy.getName());
+    }
+    // Ensure that we got the same set of policies in both cases.
+    Assert.assertTrue(
+        Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
+    Assert.assertTrue(
+        Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message