hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From aengin...@apache.org
Subject [42/50] hadoop git commit: HDFS-11565. Use compact identifiers for built-in ECPolicies in HdfsFileStatus.
Date Mon, 17 Apr 2017 17:28:41 GMT
HDFS-11565. Use compact identifiers for built-in ECPolicies in HdfsFileStatus.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/966b1b5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/966b1b5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/966b1b5b

Branch: refs/heads/HDFS-7240
Commit: 966b1b5b44103f3e3952da45da264d76fb3ac384
Parents: abce613
Author: Andrew Wang <wang@apache.org>
Authored: Wed Apr 12 12:24:32 2017 -0700
Committer: Andrew Wang <wang@apache.org>
Committed: Wed Apr 12 12:24:32 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 32 ++++++--
 .../src/main/proto/hdfs.proto                   |  6 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    | 86 +++++++++++++++++++-
 3 files changed, 113 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/966b1b5b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 98d7ef9..e703a94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
@@ -2641,20 +2642,37 @@ public class PBHelperClient {
   }
 
   public static ErasureCodingPolicy convertErasureCodingPolicy(
-      ErasureCodingPolicyProto policy) {
-    return new ErasureCodingPolicy(policy.getName(),
-        convertECSchema(policy.getSchema()),
-        policy.getCellSize(), (byte) policy.getId());
+      ErasureCodingPolicyProto proto) {
+    final byte id = (byte) (proto.getId() & 0xFF);
+    ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(id);
+    if (policy == null) {
+      // If it's not a built-in policy, populate from the optional PB fields.
+      // The optional fields are required in this case.
+      Preconditions.checkArgument(proto.hasName(),
+          "Missing name field in ErasureCodingPolicy proto");
+      Preconditions.checkArgument(proto.hasSchema(),
+          "Missing schema field in ErasureCodingPolicy proto");
+      Preconditions.checkArgument(proto.hasCellSize(),
+          "Missing cellsize field in ErasureCodingPolicy proto");
+
+      return new ErasureCodingPolicy(proto.getName(),
+          convertECSchema(proto.getSchema()),
+          proto.getCellSize(), id);
+    }
+    return policy;
   }
 
   public static ErasureCodingPolicyProto convertErasureCodingPolicy(
       ErasureCodingPolicy policy) {
     ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
         .newBuilder()
-        .setName(policy.getName())
-        .setSchema(convertECSchema(policy.getSchema()))
-        .setCellSize(policy.getCellSize())
         .setId(policy.getId());
+    // If it's not a built-in policy, need to set the optional fields.
+    if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
+      builder.setName(policy.getName())
+          .setSchema(convertECSchema(policy.getSchema()))
+          .setCellSize(policy.getCellSize());
+    }
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/966b1b5b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 99a9e68..3e3994c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -352,9 +352,9 @@ message ECSchemaProto {
 }
 
 message ErasureCodingPolicyProto {
-  required string name = 1;
-  required ECSchemaProto schema = 2;
-  required uint32 cellSize = 3;
+  optional string name = 1;
+  optional ECSchemaProto schema = 2;
+  optional uint32 cellSize = 3;
   required uint32 id = 4; // Actually a byte - only 8 bits used
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/966b1b5b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index d0363c5..7647ac4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -17,9 +17,14 @@
  */
 package org.apache.hadoop.hdfs.protocolPB;
 
+
+import com.google.protobuf.UninitializedMessageException;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
+
 import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
@@ -100,8 +105,10 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.junit.Assert;
 import org.junit.Test;
@@ -892,7 +899,7 @@ public class TestPBHelper {
         DataChecksum.Type.valueOf(DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT).id));
     HdfsProtos.FsServerDefaultsProto proto = b.build();
 
-    Assert.assertFalse("KeyProvider uri is not supported",
+    assertFalse("KeyProvider uri is not supported",
         proto.hasKeyProviderUri());
     FsServerDefaults fsServerDefaults = PBHelperClient.convert(proto);
     Assert.assertNotNull("FsServerDefaults is null", fsServerDefaults);
@@ -900,4 +907,81 @@ public class TestPBHelper {
         fsServerDefaults.getKeyProviderUri());
   }
 
+  @Test
+  public void testConvertErasureCodingPolicy() throws Exception {
+    // Check conversion of the built-in policies.
+    for (ErasureCodingPolicy policy :
+        SystemErasureCodingPolicies.getPolicies()) {
+      HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient
+          .convertErasureCodingPolicy(policy);
+      // Optional fields should not be set.
+      assertFalse("Unnecessary field is set.", proto.hasName());
+      assertFalse("Unnecessary field is set.", proto.hasSchema());
+      assertFalse("Unnecessary field is set.", proto.hasCellSize());
+      // Convert proto back to an object and check for equality.
+      ErasureCodingPolicy convertedPolicy = PBHelperClient
+          .convertErasureCodingPolicy(proto);
+      assertEquals("Converted policy not equal", policy, convertedPolicy);
+    }
+    // Check conversion of a non-built-in policy.
+    ECSchema newSchema = new ECSchema("testcodec", 3, 2);
+    ErasureCodingPolicy newPolicy =
+        new ErasureCodingPolicy(newSchema, 128 * 1024, (byte) 254);
+    HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient
+        .convertErasureCodingPolicy(newPolicy);
+    // Optional fields should be set.
+    assertTrue("Optional field not set", proto.hasName());
+    assertTrue("Optional field not set", proto.hasSchema());
+    assertTrue("Optional field not set", proto.hasCellSize());
+    ErasureCodingPolicy convertedPolicy = PBHelperClient
+        .convertErasureCodingPolicy(proto);
+    // Converted policy should be equal.
+    assertEquals("Converted policy not equal", newPolicy, convertedPolicy);
+  }
+
+  @Test(expected = UninitializedMessageException.class)
+  public void testErasureCodingPolicyMissingId() throws Exception {
+    HdfsProtos.ErasureCodingPolicyProto.Builder builder =
+        HdfsProtos.ErasureCodingPolicyProto.newBuilder();
+    PBHelperClient.convertErasureCodingPolicy(builder.build());
+  }
+
+  @Test
+  public void testErasureCodingPolicyMissingOptionalFields() throws Exception {
+    // For non-built-in policies, the optional fields are required
+    // when parsing an ErasureCodingPolicyProto.
+    HdfsProtos.ECSchemaProto schemaProto =
+        PBHelperClient.convertECSchema(
+            StripedFileTestUtil.getDefaultECPolicy().getSchema());
+    try {
+      PBHelperClient.convertErasureCodingPolicy(
+          HdfsProtos.ErasureCodingPolicyProto.newBuilder()
+              .setId(14)
+              .setSchema(schemaProto)
+              .setCellSize(123)
+              .build());
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Missing", e);
+    }
+    try {
+      PBHelperClient.convertErasureCodingPolicy(
+          HdfsProtos.ErasureCodingPolicyProto.newBuilder()
+              .setId(14)
+              .setName("testpolicy")
+              .setCellSize(123)
+              .build());
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Missing", e);
+    }
+    try {
+      PBHelperClient.convertErasureCodingPolicy(
+          HdfsProtos.ErasureCodingPolicyProto.newBuilder()
+              .setId(14)
+              .setName("testpolicy")
+              .setSchema(schemaProto)
+              .build());
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Missing", e);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message