hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From tasan...@apache.org
Subject [hadoop] branch trunk updated: HDFS-15063. HttpFS: getFileStatus doesn't return ecPolicy. Contributed by hemanthboyina.
Date Wed, 01 Jan 2020 02:28:04 GMT
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 074050c  HDFS-15063. HttpFS: getFileStatus doesn't return ecPolicy. Contributed by
hemanthboyina.
074050c is described below

commit 074050ca595a81927c867951e48cef132a0284be
Author: Takanobu Asanuma <tasanuma@apache.org>
AuthorDate: Wed Jan 1 11:26:38 2020 +0900

    HDFS-15063. HttpFS: getFileStatus doesn't return ecPolicy. Contributed by hemanthboyina.
---
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java | 11 ++---
 .../hadoop/fs/http/client/HttpFSFileSystem.java    |  8 ++--
 .../apache/hadoop/fs/http/server/FSOperations.java |  7 +++
 .../hadoop/fs/http/server/TestHttpFSServer.java    | 50 ++++++++++++++++++++++
 .../java/org/apache/hadoop/hdfs/web/JsonUtil.java  |  2 +-
 5 files changed, 69 insertions(+), 9 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index f9b847c..6f2f3b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -115,7 +115,7 @@ public class JsonUtilClient {
   }
 
   /** Convert a Json map to a HdfsFileStatus object. */
-  static HdfsFileStatus toFileStatus(final Map<?, ?> json,
+  public static HdfsFileStatus toFileStatus(final Map<?, ?> json,
       boolean includesType) {
     if (json == null) {
       return null;
@@ -158,11 +158,12 @@ public class JsonUtilClient {
     if (ecPolicyObj != null) {
       Map<String, String> extraOptions = (Map) ecPolicyObj.get("extraOptions");
       ECSchema ecSchema = new ECSchema((String) ecPolicyObj.get("codecName"),
-          (int) ecPolicyObj.get("numDataUnits"),
-          (int) ecPolicyObj.get("numParityUnits"), extraOptions);
+          (int) ((Number) ecPolicyObj.get("numDataUnits")).longValue(),
+          (int) ((Number) ecPolicyObj.get("numParityUnits")).longValue(),
+          extraOptions);
       ecPolicy = new ErasureCodingPolicy((String) ecPolicyObj.get("name"),
-          ecSchema, (int) ecPolicyObj.get("cellSize"),
-          (byte) (int) ecPolicyObj.get("id"));
+          ecSchema, (int) ((Number) ecPolicyObj.get("cellSize")).longValue(),
+          (byte) (int) ((Number) ecPolicyObj.get("id")).longValue());
 
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 8066847..a168479 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.web.JsonUtilClient;
@@ -188,10 +189,12 @@ public class HttpFSFileSystem extends FileSystem
   public static final String FILE_ID_JSON = "fileId";
   public static final String REPLICATION_JSON = "replication";
   public static final String STORAGEPOLICY_JSON = "storagePolicy";
+  public static final String ECPOLICYNAME_JSON = "ecPolicy";
   public static final String XATTRS_JSON = "XAttrs";
   public static final String XATTR_NAME_JSON = "name";
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
+  public static final String ECPOLICY_JSON = "ecPolicyObj";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -846,9 +849,8 @@ public class HttpFSFileSystem extends FileSystem
                                            params, f, true);
     HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
-    json = (JSONObject) json.get(FILE_STATUS_JSON);
-    f = makeQualified(f);
-    return createFileStatus(f, json);
+    HdfsFileStatus status = JsonUtilClient.toFileStatus(json, true);
+    return status.makeQualified(getUri(), f);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index bd1f3ad..6ef5a9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -130,6 +130,13 @@ public class FSOperations {
           hdfsFileStatus.getFileId());
       json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
           hdfsFileStatus.getStoragePolicy());
+      if (hdfsFileStatus.getErasureCodingPolicy() != null) {
+        json.put(HttpFSFileSystem.ECPOLICYNAME_JSON,
+            hdfsFileStatus.getErasureCodingPolicy().getName());
+        json.put(HttpFSFileSystem.ECPOLICY_JSON,
+            JsonUtil.getEcPolicyAsMap(
+                hdfsFileStatus.getErasureCodingPolicy()));
+      }
     }
     if (fileStatus.getPermission().getAclBit()) {
       json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 0a27e2b..c6e6611 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -17,11 +17,18 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
@@ -42,6 +49,7 @@ import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.Writer;
 import java.net.HttpURLConnection;
+import java.net.URI;
 import java.net.URL;
 import java.nio.charset.Charset;
 import java.text.MessageFormat;
@@ -65,6 +73,7 @@ import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.lib.server.Service;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.Groups;
@@ -1688,4 +1697,45 @@ public class TestHttpFSServer extends HFSTestCase {
         sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=-w-");
     Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
   }
+
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testECPolicy() throws Exception {
+    createHttpFSServer(false, false);
+    final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
+        .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
+    final String ecPolicyName = ecPolicy.getName();
+    // Create an EC dir and write a test file in it
+    final Path ecDir = new Path("/ec");
+
+    DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
+        .get(ecDir.toUri(), TestHdfsHelper.getHdfsConf());
+    Path ecFile = new Path(ecDir, "ec_file.txt");
+    dfs.mkdirs(ecDir);
+    dfs.enableErasureCodingPolicy(ecPolicyName);
+    dfs.setErasureCodingPolicy(ecDir, ecPolicyName);
+    // Create a EC file
+    DFSTestUtil.createFile(dfs, ecFile, 1024, (short) 1, 0);
+
+    // Verify that ecPolicy is set in getFileStatus response for ecFile
+    String getFileStatusResponse =
+        getStatus(ecFile.toString(), "GETFILESTATUS");
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(getFileStatusResponse);
+    JSONObject details = (JSONObject) jsonObject.get("FileStatus");
+    String ecpolicyForECfile = (String) details.get("ecPolicy");
+    assertEquals("EC policy for ecFile should match the set EC policy",
+        ecpolicyForECfile, ecPolicyName);
+
+    // Verify httpFs getFileStatus with WEBHDFS REST API
+    WebHdfsFileSystem httpfsWebHdfs = (WebHdfsFileSystem) FileSystem.get(
+        new URI("webhdfs://"
+            + TestJettyHelper.getJettyURL().toURI().getAuthority()),
+        TestHdfsHelper.getHdfsConf());
+    HdfsFileStatus httpfsFileStatus =
+        (HdfsFileStatus) httpfsWebHdfs.getFileStatus(ecFile);
+    assertNotNull(httpfsFileStatus.getErasureCodingPolicy());
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 0e8638d..4a5b757 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -157,7 +157,7 @@ public class JsonUtil {
     return m;
   }
 
-  private static Map<String, Object> getEcPolicyAsMap(
+  public static Map<String, Object> getEcPolicyAsMap(
       final ErasureCodingPolicy ecPolicy) {
     /** Convert an ErasureCodingPolicy to a map. */
     ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message